You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2016/11/30 09:49:54 UTC

[01/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Repository: cassandra
Updated Branches:
  refs/heads/trunk 3fabc3350 -> 4a2464192


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java b/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
deleted file mode 100644
index 239a90d..0000000
--- a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.schema;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ImmutableList;
-import org.junit.Test;
-
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.config.SchemaConstants;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.cql3.ColumnIdentifier;
-import org.apache.cassandra.cql3.FieldIdentifier;
-import org.apache.cassandra.cql3.functions.*;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.rows.Row;
-import org.apache.cassandra.db.marshal.*;
-import org.apache.cassandra.index.TargetParser;
-import org.apache.cassandra.thrift.ThriftConversion;
-import org.apache.cassandra.utils.*;
-
-import static java.lang.String.format;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertTrue;
-import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-import static org.apache.cassandra.utils.FBUtilities.json;
-
-@SuppressWarnings("deprecation")
-public class LegacySchemaMigratorTest
-{
-    private static final long TIMESTAMP = 1435908994000000L;
-
-    private static final String KEYSPACE_PREFIX = "LegacySchemaMigratorTest";
-
-    /*
-     * 1. Write a variety of different keyspaces/tables/types/function in the legacy manner, using legacy schema tables
-     * 2. Run the migrator
-     * 3. Read all the keyspaces from the new schema tables
-     * 4. Make sure that we've read *exactly* the same set of keyspaces/tables/types/functions
-     * 5. Validate that the legacy schema tables are now empty
-     */
-    @Test
-    public void testMigrate() throws IOException
-    {
-        CQLTester.cleanupAndLeaveDirs();
-
-        Keyspaces expected = keyspacesToMigrate();
-
-        // write the keyspaces into the legacy tables
-        expected.forEach(LegacySchemaMigratorTest::legacySerializeKeyspace);
-
-        // run the migration
-        LegacySchemaMigrator.migrate();
-
-        // read back all the metadata from the new schema tables
-        Keyspaces actual = SchemaKeyspace.fetchNonSystemKeyspaces();
-
-        // need to load back CFMetaData of those tables (CFS instances will still be loaded)
-        loadLegacySchemaTables();
-
-        // verify that nothing's left in the old schema tables
-        for (CFMetaData table : LegacySchemaMigrator.LegacySchemaTables)
-        {
-            String query = format("SELECT * FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, table.cfName);
-            //noinspection ConstantConditions
-            assertTrue(executeOnceInternal(query).isEmpty());
-        }
-
-        // make sure that we've read *exactly* the same set of keyspaces/tables/types/functions
-        assertEquals(expected.diff(actual).toString(), expected, actual);
-
-        // check that the build status of all indexes has been updated to use the new
-        // format of index name: the index_name column of system.IndexInfo used to
-        // contain table_name.index_name. Now it should contain just the index_name.
-        expected.forEach(LegacySchemaMigratorTest::verifyIndexBuildStatus);
-    }
-
-    private static FieldIdentifier field(String field)
-    {
-        return FieldIdentifier.forQuoted(field);
-    }
-
-    private static void loadLegacySchemaTables()
-    {
-        KeyspaceMetadata systemKeyspace = Schema.instance.getKSMetaData(SchemaConstants.SYSTEM_KEYSPACE_NAME);
-
-        Tables systemTables = systemKeyspace.tables;
-        for (CFMetaData table : LegacySchemaMigrator.LegacySchemaTables)
-            systemTables = systemTables.with(table);
-
-        LegacySchemaMigrator.LegacySchemaTables.forEach(Schema.instance::load);
-
-        Schema.instance.setKeyspaceMetadata(systemKeyspace.withSwapped(systemTables));
-    }
-
-    private static Keyspaces keyspacesToMigrate()
-    {
-        Keyspaces.Builder keyspaces = Keyspaces.builder();
-
-        // A whole bucket of shorthand
-        String ks1 = KEYSPACE_PREFIX + "Keyspace1";
-        String ks2 = KEYSPACE_PREFIX + "Keyspace2";
-        String ks3 = KEYSPACE_PREFIX + "Keyspace3";
-        String ks4 = KEYSPACE_PREFIX + "Keyspace4";
-        String ks5 = KEYSPACE_PREFIX + "Keyspace5";
-        String ks6 = KEYSPACE_PREFIX + "Keyspace6";
-        String ks_rcs = KEYSPACE_PREFIX + "RowCacheSpace";
-        String ks_nocommit = KEYSPACE_PREFIX + "NoCommitlogSpace";
-        String ks_prsi = KEYSPACE_PREFIX + "PerRowSecondaryIndex";
-        String ks_cql = KEYSPACE_PREFIX + "cql_keyspace";
-
-        // Make it easy to test compaction
-        Map<String, String> compactionOptions = new HashMap<>();
-        compactionOptions.put("tombstone_compaction_interval", "1");
-
-        Map<String, String> leveledOptions = new HashMap<>();
-        leveledOptions.put("sstable_size_in_mb", "1");
-
-        keyspaces.add(KeyspaceMetadata.create(ks1,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(SchemaLoader.standardCFMD(ks1, "Standard1")
-                                                                    .compaction(CompactionParams.scts(compactionOptions)),
-                                                        SchemaLoader.standardCFMD(ks1, "StandardGCGS0").gcGraceSeconds(0),
-                                                        SchemaLoader.standardCFMD(ks1, "StandardLong1"),
-                                                        SchemaLoader.superCFMD(ks1, "Super1", LongType.instance),
-                                                        SchemaLoader.superCFMD(ks1, "Super2", UTF8Type.instance),
-                                                        SchemaLoader.superCFMD(ks1, "Super5", BytesType.instance),
-                                                        SchemaLoader.superCFMD(ks1, "Super6", LexicalUUIDType.instance, UTF8Type.instance),
-                                                        SchemaLoader.keysIndexCFMD(ks1, "Indexed1", true),
-                                                        SchemaLoader.keysIndexCFMD(ks1, "Indexed2", false),
-                                                        SchemaLoader.superCFMD(ks1, "SuperDirectGC", BytesType.instance)
-                                                                    .gcGraceSeconds(0),
-                                                        SchemaLoader.jdbcCFMD(ks1, "JdbcUtf8", UTF8Type.instance)
-                                                                    .addColumnDefinition(SchemaLoader.utf8Column(ks1, "JdbcUtf8")),
-                                                        SchemaLoader.jdbcCFMD(ks1, "JdbcLong", LongType.instance),
-                                                        SchemaLoader.jdbcCFMD(ks1, "JdbcBytes", BytesType.instance),
-                                                        SchemaLoader.jdbcCFMD(ks1, "JdbcAscii", AsciiType.instance),
-                                                        SchemaLoader.standardCFMD(ks1, "StandardLeveled")
-                                                                    .compaction(CompactionParams.lcs(leveledOptions)),
-                                                        SchemaLoader.standardCFMD(ks1, "legacyleveled")
-                                                                    .compaction(CompactionParams.lcs(leveledOptions)),
-                                                        SchemaLoader.standardCFMD(ks1, "StandardLowIndexInterval")
-                                                                    .minIndexInterval(8)
-                                                                    .maxIndexInterval(256)
-                                                                    .caching(CachingParams.CACHE_NOTHING))));
-
-        // Keyspace 2
-        keyspaces.add(KeyspaceMetadata.create(ks2,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(SchemaLoader.standardCFMD(ks2, "Standard1"),
-                                                        SchemaLoader.superCFMD(ks2, "Super3", BytesType.instance),
-                                                        SchemaLoader.superCFMD(ks2, "Super4", TimeUUIDType.instance),
-                                                        SchemaLoader.keysIndexCFMD(ks2, "Indexed1", true),
-                                                        SchemaLoader.compositeIndexCFMD(ks2, "Indexed2", true),
-                                                        SchemaLoader.compositeIndexCFMD(ks2, "Indexed3", true)
-                                                                    .gcGraceSeconds(0))));
-
-        // Keyspace 3
-        keyspaces.add(KeyspaceMetadata.create(ks3,
-                                              KeyspaceParams.simple(5),
-                                              Tables.of(SchemaLoader.standardCFMD(ks3, "Standard1"),
-                                                        SchemaLoader.keysIndexCFMD(ks3, "Indexed1", true))));
-
-        // Keyspace 4
-        keyspaces.add(KeyspaceMetadata.create(ks4,
-                                              KeyspaceParams.simple(3),
-                                              Tables.of(SchemaLoader.standardCFMD(ks4, "Standard1"),
-                                                        SchemaLoader.superCFMD(ks4, "Super3", BytesType.instance),
-                                                        SchemaLoader.superCFMD(ks4, "Super4", TimeUUIDType.instance),
-                                                        SchemaLoader.superCFMD(ks4, "Super5", TimeUUIDType.instance, BytesType.instance))));
-
-        // Keyspace 5
-        keyspaces.add(KeyspaceMetadata.create(ks5,
-                                              KeyspaceParams.simple(2),
-                                              Tables.of(SchemaLoader.standardCFMD(ks5, "Standard1"))));
-
-        // Keyspace 6
-        keyspaces.add(KeyspaceMetadata.create(ks6,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(SchemaLoader.keysIndexCFMD(ks6, "Indexed1", true))));
-
-        // RowCacheSpace
-        keyspaces.add(KeyspaceMetadata.create(ks_rcs,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(SchemaLoader.standardCFMD(ks_rcs, "CFWithoutCache")
-                                                                    .caching(CachingParams.CACHE_NOTHING),
-                                                        SchemaLoader.standardCFMD(ks_rcs, "CachedCF")
-                                                                    .caching(CachingParams.CACHE_EVERYTHING),
-                                                        SchemaLoader.standardCFMD(ks_rcs, "CachedIntCF")
-                                                                    .caching(new CachingParams(true, 100)))));
-
-        keyspaces.add(KeyspaceMetadata.create(ks_nocommit,
-                                              KeyspaceParams.simpleTransient(1),
-                                              Tables.of(SchemaLoader.standardCFMD(ks_nocommit, "Standard1"))));
-
-        // PerRowSecondaryIndexTest
-        keyspaces.add(KeyspaceMetadata.create(ks_prsi,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(SchemaLoader.perRowIndexedCFMD(ks_prsi, "Indexed1"))));
-
-        // CQLKeyspace
-        keyspaces.add(KeyspaceMetadata.create(ks_cql,
-                                              KeyspaceParams.simple(1),
-                                              Tables.of(CFMetaData.compile("CREATE TABLE table1 ("
-                                                                           + "k int PRIMARY KEY,"
-                                                                           + "v1 text,"
-                                                                           + "v2 int"
-                                                                           + ')', ks_cql),
-
-                                                        CFMetaData.compile("CREATE TABLE table2 ("
-                                                                           + "k text,"
-                                                                           + "c text,"
-                                                                           + "v text,"
-                                                                           + "PRIMARY KEY (k, c))", ks_cql),
-
-                                                        CFMetaData.compile("CREATE TABLE foo ("
-                                                                           + "bar text, "
-                                                                           + "baz text, "
-                                                                           + "qux text, "
-                                                                           + "PRIMARY KEY(bar, baz) ) "
-                                                                           + "WITH COMPACT STORAGE", ks_cql),
-
-                                                        CFMetaData.compile("CREATE TABLE compact_pkonly ("
-                                                                           + "k int, "
-                                                                           + "c int, "
-                                                                           + "PRIMARY KEY (k, c)) "
-                                                                           + "WITH COMPACT STORAGE",
-                                                                           ks_cql),
-
-                                                        CFMetaData.compile("CREATE TABLE foofoo ("
-                                                                           + "bar text, "
-                                                                           + "baz text, "
-                                                                           + "qux text, "
-                                                                           + "quz text, "
-                                                                           + "foo text, "
-                                                                           + "PRIMARY KEY((bar, baz), qux, quz) ) "
-                                                                           + "WITH COMPACT STORAGE", ks_cql))));
-
-        // NTS keyspace
-        keyspaces.add(KeyspaceMetadata.create("nts", KeyspaceParams.nts("dc1", 1, "dc2", 2)));
-
-        keyspaces.add(keyspaceWithDroppedCollections());
-        keyspaces.add(keyspaceWithTriggers());
-        keyspaces.add(keyspaceWithUDTs());
-        keyspaces.add(keyspaceWithUDFs());
-        keyspaces.add(keyspaceWithUDFsAndUDTs());
-        keyspaces.add(keyspaceWithUDAs());
-        keyspaces.add(keyspaceWithUDAsAndUDTs());
-
-        return keyspaces.build();
-    }
-
-    private static KeyspaceMetadata keyspaceWithDroppedCollections()
-    {
-        String keyspace = KEYSPACE_PREFIX + "DroppedCollections";
-
-        CFMetaData table =
-            CFMetaData.compile("CREATE TABLE dropped_columns ("
-                               + "foo text,"
-                               + "bar text,"
-                               + "map1 map<text, text>,"
-                               + "map2 map<int, int>,"
-                               + "set1 set<ascii>,"
-                               + "list1 list<blob>,"
-                               + "PRIMARY KEY ((foo), bar))",
-                               keyspace);
-
-        String[] collectionColumnNames = { "map1", "map2", "set1", "list1" };
-        for (String name : collectionColumnNames)
-        {
-            ColumnDefinition column = table.getColumnDefinition(bytes(name));
-            table.recordColumnDrop(column, FBUtilities.timestampMicros());
-            table.removeColumnDefinition(column);
-        }
-
-        return KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1), Tables.of(table));
-    }
-
-    private static KeyspaceMetadata keyspaceWithTriggers()
-    {
-        String keyspace = KEYSPACE_PREFIX + "Triggers";
-
-        Triggers.Builder triggers = Triggers.builder();
-        CFMetaData table = SchemaLoader.standardCFMD(keyspace, "WithTriggers");
-        for (int i = 0; i < 10; i++)
-            triggers.add(new TriggerMetadata("trigger" + i, "DummyTrigger" + i));
-        table.triggers(triggers.build());
-
-        return KeyspaceMetadata.create(keyspace, KeyspaceParams.simple(1), Tables.of(table));
-    }
-
-    private static KeyspaceMetadata keyspaceWithUDTs()
-    {
-        String keyspace = KEYSPACE_PREFIX + "UDTs";
-
-        UserType udt1 = new UserType(keyspace,
-                                     bytes("udt1"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col1")); add(field("col2")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(UTF8Type.instance); add(Int32Type.instance); }},
-                                     true);
-
-        UserType udt2 = new UserType(keyspace,
-                                     bytes("udt2"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col3")); add(field("col4")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(BytesType.instance); add(BooleanType.instance); }},
-                                     true);
-
-        UserType udt3 = new UserType(keyspace,
-                                     bytes("udt3"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col5")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(AsciiType.instance); }},
-                                     true);
-
-        return KeyspaceMetadata.create(keyspace,
-                                       KeyspaceParams.simple(1),
-                                       Tables.none(),
-                                       Views.none(),
-                                       Types.of(udt1, udt2, udt3),
-                                       Functions.none());
-    }
-
-    private static KeyspaceMetadata keyspaceWithUDFs()
-    {
-        String keyspace = KEYSPACE_PREFIX + "UDFs";
-
-        UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(BytesType.instance, Int32Type.instance),
-                                            LongType.instance,
-                                            false,
-                                            "java",
-                                            "return 42L;");
-
-        // an overload with the same name, not a typo
-        UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf"),
-                                            ImmutableList.of(new ColumnIdentifier("col3", false), new ColumnIdentifier("col4", false)),
-                                            ImmutableList.of(AsciiType.instance, LongType.instance),
-                                            Int32Type.instance,
-                                            true,
-                                            "java",
-                                            "return 42;");
-
-        UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"),
-                                            ImmutableList.of(new ColumnIdentifier("col4", false)),
-                                            ImmutableList.of(UTF8Type.instance),
-                                            BooleanType.instance,
-                                            false,
-                                            "java",
-                                            "return true;");
-
-        return KeyspaceMetadata.create(keyspace,
-                                       KeyspaceParams.simple(1),
-                                       Tables.none(),
-                                       Views.none(),
-                                       Types.none(),
-                                       Functions.of(udf1, udf2, udf3));
-    }
-
-    private static KeyspaceMetadata keyspaceWithUDAs()
-    {
-        String keyspace = KEYSPACE_PREFIX + "UDAs";
-
-        UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf1"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(Int32Type.instance, Int32Type.instance),
-                                            Int32Type.instance,
-                                            false,
-                                            "java",
-                                            "return 42;");
-
-        UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf2"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(LongType.instance, Int32Type.instance),
-                                            LongType.instance,
-                                            false,
-                                            "java",
-                                            "return 42L;");
-
-        UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false)),
-                                            ImmutableList.of(LongType.instance),
-                                            DoubleType.instance,
-                                            false,
-                                            "java",
-                                            "return 42d;");
-
-        Functions udfs = Functions.builder().add(udf1).add(udf2).add(udf3).build();
-
-        UDAggregate uda1 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda1"),
-                                              ImmutableList.of(udf1.argTypes().get(1)),
-                                              udf1.returnType(),
-                                              udf1.name(),
-                                              null,
-                                              udf1.argTypes().get(0),
-                                              null
-        );
-
-        UDAggregate uda2 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda2"),
-                                              ImmutableList.of(udf2.argTypes().get(1)),
-                                              udf3.returnType(),
-                                              udf2.name(),
-                                              udf3.name(),
-                                              udf2.argTypes().get(0),
-                                              LongType.instance.decompose(0L)
-        );
-
-        return KeyspaceMetadata.create(keyspace,
-                                       KeyspaceParams.simple(1),
-                                       Tables.none(),
-                                       Views.none(),
-                                       Types.none(),
-                                       Functions.of(udf1, udf2, udf3, uda1, uda2));
-    }
-
-    private static KeyspaceMetadata keyspaceWithUDFsAndUDTs()
-    {
-        String keyspace = KEYSPACE_PREFIX + "UDFUDTs";
-
-        UserType udt1 = new UserType(keyspace,
-                                     bytes("udt1"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col1")); add(field("col2")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(UTF8Type.instance); add(Int32Type.instance); }},
-                                     true);
-
-        UserType udt2 = new UserType(keyspace,
-                                     bytes("udt2"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col1")); add(field("col2")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(ListType.getInstance(udt1, false)); add(Int32Type.instance); }},
-                                     true);
-
-        UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(udt1, udt2),
-                                            LongType.instance,
-                                            false,
-                                            "java",
-                                            "return 42L;");
-
-        // an overload with the same name, not a typo
-        UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf"),
-                                            ImmutableList.of(new ColumnIdentifier("col3", false), new ColumnIdentifier("col4", false)),
-                                            ImmutableList.of(AsciiType.instance, LongType.instance),
-                                            Int32Type.instance,
-                                            true,
-                                            "java",
-                                            "return 42;");
-
-        UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"),
-                                            ImmutableList.of(new ColumnIdentifier("col4", false)),
-                                            ImmutableList.of(new TupleType(Arrays.asList(udt1, udt2))),
-                                            BooleanType.instance,
-                                            false,
-                                            "java",
-                                            "return true;");
-
-        return KeyspaceMetadata.create(keyspace,
-                                       KeyspaceParams.simple(1),
-                                       Tables.none(),
-                                       Views.none(),
-                                       Types.of(udt1, udt2),
-                                       Functions.of(udf1, udf2, udf3));
-    }
-
-    private static KeyspaceMetadata keyspaceWithUDAsAndUDTs()
-    {
-        String keyspace = KEYSPACE_PREFIX + "UDAUDTs";
-
-        UserType udt1 = new UserType(keyspace,
-                                     bytes("udt1"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col1")); add(field("col2")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(UTF8Type.instance); add(Int32Type.instance); }},
-                                     true);
-
-        UserType udt2 = new UserType(keyspace,
-                                     bytes("udt2"),
-                                     new ArrayList<FieldIdentifier>() {{ add(field("col1")); add(field("col2")); }},
-                                     new ArrayList<AbstractType<?>>() {{ add(ListType.getInstance(udt1, false)); add(Int32Type.instance); }},
-                                     true);
-
-        UDFunction udf1 = UDFunction.create(new FunctionName(keyspace, "udf1"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(udt1, udt2),
-                                            udt1,
-                                            false,
-                                            "java",
-                                            "return null;");
-
-        UDFunction udf2 = UDFunction.create(new FunctionName(keyspace, "udf2"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false), new ColumnIdentifier("col2", false)),
-                                            ImmutableList.of(udt2, udt1),
-                                            udt2,
-                                            false,
-                                            "java",
-                                            "return null;");
-
-        UDFunction udf3 = UDFunction.create(new FunctionName(keyspace, "udf3"),
-                                            ImmutableList.of(new ColumnIdentifier("col1", false)),
-                                            ImmutableList.of(udt2),
-                                            DoubleType.instance,
-                                            false,
-                                            "java",
-                                            "return 42d;");
-
-        Functions udfs = Functions.builder().add(udf1).add(udf2).add(udf3).build();
-
-        UDAggregate uda1 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda1"),
-                                              ImmutableList.of(udf1.argTypes().get(1)),
-                                              udf1.returnType(),
-                                              udf1.name(),
-                                              null,
-                                              udf1.argTypes().get(0),
-                                              null
-        );
-
-        ByteBuffer twoNullEntries = ByteBuffer.allocate(8);
-        twoNullEntries.putInt(-1);
-        twoNullEntries.putInt(-1);
-        twoNullEntries.flip();
-        UDAggregate uda2 = UDAggregate.create(udfs, new FunctionName(keyspace, "uda2"),
-                                              ImmutableList.of(udf2.argTypes().get(1)),
-                                              udf3.returnType(),
-                                              udf2.name(),
-                                              udf3.name(),
-                                              udf2.argTypes().get(0),
-                                              twoNullEntries
-        );
-
-        return KeyspaceMetadata.create(keyspace,
-                                       KeyspaceParams.simple(1),
-                                       Tables.none(),
-                                       Views.none(),
-                                       Types.of(udt1, udt2),
-                                       Functions.of(udf1, udf2, udf3, uda1, uda2));
-    }
-
-    /*
-     * Serializing keyspaces
-     */
-
-    private static void legacySerializeKeyspace(KeyspaceMetadata keyspace)
-    {
-        makeLegacyCreateKeyspaceMutation(keyspace, TIMESTAMP).apply();
-        setLegacyIndexStatus(keyspace);
-    }
-
-    private static DecoratedKey decorate(CFMetaData metadata, Object value)
-    {
-        return metadata.decorateKey(((AbstractType)metadata.getKeyValidator()).decompose(value));
-    }
-
-    private static Mutation makeLegacyCreateKeyspaceMutation(KeyspaceMetadata keyspace, long timestamp)
-    {
-        Mutation.SimpleBuilder builder = Mutation.simpleBuilder(SchemaConstants.SYSTEM_KEYSPACE_NAME, decorate(SystemKeyspace.LegacyKeyspaces, keyspace.name))
-                                                 .timestamp(timestamp);
-
-        builder.update(SystemKeyspace.LegacyKeyspaces)
-               .row()
-               .add("durable_writes", keyspace.params.durableWrites)
-               .add("strategy_class", keyspace.params.replication.klass.getName())
-               .add("strategy_options", json(keyspace.params.replication.options));
-
-        keyspace.tables.forEach(table -> addTableToSchemaMutation(table, true, builder));
-        keyspace.types.forEach(type -> addTypeToSchemaMutation(type, builder));
-        keyspace.functions.udfs().forEach(udf -> addFunctionToSchemaMutation(udf, builder));
-        keyspace.functions.udas().forEach(uda -> addAggregateToSchemaMutation(uda, builder));
-
-        return builder.build();
-    }
-
-    /*
-     * Serializing tables
-     */
-
-    private static void addTableToSchemaMutation(CFMetaData table, boolean withColumnsAndTriggers, Mutation.SimpleBuilder builder)
-    {
-        // For property that can be null (and can be changed), we insert tombstones, to make sure
-        // we don't keep a property the user has removed
-        Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyColumnfamilies)
-                                         .row(table.cfName);
-
-        adder.add("cf_id", table.cfId)
-             .add("type", table.isSuper() ? "Super" : "Standard");
-
-        if (table.isSuper())
-        {
-            adder.add("comparator", table.comparator.subtype(0).toString())
-                 .add("subcomparator", ((MapType)table.compactValueColumn().type).getKeysType().toString());
-        }
-        else
-        {
-            adder.add("comparator", LegacyLayout.makeLegacyComparator(table).toString());
-        }
-
-        adder.add("bloom_filter_fp_chance", table.params.bloomFilterFpChance)
-             .add("caching", cachingToString(table.params.caching))
-             .add("comment", table.params.comment)
-             .add("compaction_strategy_class", table.params.compaction.klass().getName())
-             .add("compaction_strategy_options", json(table.params.compaction.options()))
-             .add("compression_parameters", json(ThriftConversion.compressionParametersToThrift(table.params.compression)))
-             .add("default_time_to_live", table.params.defaultTimeToLive)
-             .add("gc_grace_seconds", table.params.gcGraceSeconds)
-             .add("key_validator", table.getKeyValidator().toString())
-             .add("local_read_repair_chance", table.params.dcLocalReadRepairChance)
-             .add("max_compaction_threshold", table.params.compaction.maxCompactionThreshold())
-             .add("max_index_interval", table.params.maxIndexInterval)
-             .add("memtable_flush_period_in_ms", table.params.memtableFlushPeriodInMs)
-             .add("min_compaction_threshold", table.params.compaction.minCompactionThreshold())
-             .add("min_index_interval", table.params.minIndexInterval)
-             .add("read_repair_chance", table.params.readRepairChance)
-             .add("speculative_retry", table.params.speculativeRetry.toString());
-
-        Map<String, Long> dropped = new HashMap<>();
-        for (Map.Entry<ByteBuffer, CFMetaData.DroppedColumn> entry : table.getDroppedColumns().entrySet())
-        {
-            String name = UTF8Type.instance.getString(entry.getKey());
-            CFMetaData.DroppedColumn column = entry.getValue();
-            dropped.put(name, column.droppedTime);
-        }
-        adder.add("dropped_columns", dropped);
-
-        adder.add("is_dense", table.isDense());
-
-        adder.add("default_validator", table.makeLegacyDefaultValidator().toString());
-
-        if (withColumnsAndTriggers)
-        {
-            for (ColumnDefinition column : table.allColumns())
-                addColumnToSchemaMutation(table, column, builder);
-
-            for (TriggerMetadata trigger : table.getTriggers())
-                addTriggerToSchemaMutation(table, trigger, builder);
-        }
-    }
-
-    private static String cachingToString(CachingParams caching)
-    {
-        return format("{\"keys\":\"%s\", \"rows_per_partition\":\"%s\"}",
-                      caching.keysAsString(),
-                      caching.rowsPerPartitionAsString());
-    }
-
-    private static void addColumnToSchemaMutation(CFMetaData table, ColumnDefinition column, Mutation.SimpleBuilder builder)
-    {
-        // We need to special case pk-only dense tables. See CASSANDRA-9874.
-        String name = table.isDense() && column.kind == ColumnDefinition.Kind.REGULAR && column.type instanceof EmptyType
-                    ? ""
-                    : column.name.toString();
-
-        final Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyColumns).row(table.cfName, name);
-
-        adder.add("validator", column.type.toString())
-             .add("type", serializeKind(column.kind, table.isDense()))
-             .add("component_index", column.position());
-
-        Optional<IndexMetadata> index = findIndexForColumn(table.getIndexes(), table, column);
-        if (index.isPresent())
-        {
-            IndexMetadata i = index.get();
-            adder.add("index_name", i.name);
-            adder.add("index_type", i.kind.toString());
-            adder.add("index_options", json(i.options));
-        }
-        else
-        {
-            adder.add("index_name", null);
-            adder.add("index_type", null);
-            adder.add("index_options", null);
-        }
-    }
-
-    private static Optional<IndexMetadata> findIndexForColumn(Indexes indexes,
-                                                              CFMetaData table,
-                                                              ColumnDefinition column)
-    {
-        // makes the assumptions that the string option denoting the
-        // index targets can be parsed by CassandraIndex.parseTarget
-        // which should be true for any pre-3.0 index
-        for (IndexMetadata index : indexes)
-          if (TargetParser.parse(table, index).left.equals(column))
-                return Optional.of(index);
-
-        return Optional.empty();
-    }
-
-    private static String serializeKind(ColumnDefinition.Kind kind, boolean isDense)
-    {
-        // For backward compatibility, we special case CLUSTERING and the case where the table is dense.
-        if (kind == ColumnDefinition.Kind.CLUSTERING)
-            return "clustering_key";
-
-        if (kind == ColumnDefinition.Kind.REGULAR && isDense)
-            return "compact_value";
-
-        return kind.toString().toLowerCase();
-    }
-
-    private static void addTriggerToSchemaMutation(CFMetaData table, TriggerMetadata trigger, Mutation.SimpleBuilder builder)
-    {
-        builder.update(SystemKeyspace.LegacyTriggers)
-               .row(table.cfName, trigger.name)
-               .add("trigger_options", Collections.singletonMap("class", trigger.classOption));
-    }
-
-    /*
-     * Serializing types
-     */
-
-    private static void addTypeToSchemaMutation(UserType type, Mutation.SimpleBuilder builder)
-    {
-        Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyUsertypes)
-                                         .row(type.getNameAsString());
-
-        List<String> names = new ArrayList<>();
-        List<String> types = new ArrayList<>();
-        for (int i = 0; i < type.size(); i++)
-        {
-            names.add(type.fieldName(i).toString());
-            types.add(type.fieldType(i).toString());
-        }
-
-        adder.add("field_names", names)
-             .add("field_types", types);
-    }
-
-    /*
-     * Serializing functions
-     */
-
-    private static void addFunctionToSchemaMutation(UDFunction function, Mutation.SimpleBuilder builder)
-    {
-        Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyFunctions)
-                                         .row(function.name().name, functionSignatureWithTypes(function));
-
-        adder.add("body", function.body())
-             .add("language", function.language())
-             .add("return_type", function.returnType().toString())
-             .add("called_on_null_input", function.isCalledOnNullInput());
-
-        List<ByteBuffer> names = new ArrayList<>();
-        List<String> types = new ArrayList<>();
-        for (int i = 0; i < function.argNames().size(); i++)
-        {
-            names.add(function.argNames().get(i).bytes);
-            types.add(function.argTypes().get(i).toString());
-        }
-        adder.add("argument_names", names)
-             .add("argument_types", types);
-    }
-
-    /*
-     * Serializing aggregates
-     */
-
-    private static void addAggregateToSchemaMutation(UDAggregate aggregate, Mutation.SimpleBuilder builder)
-    {
-        Row.SimpleBuilder adder = builder.update(SystemKeyspace.LegacyAggregates)
-                                 .row(aggregate.name().name, functionSignatureWithTypes(aggregate));
-
-        adder.add("return_type", aggregate.returnType().toString())
-             .add("state_func", aggregate.stateFunction().name().name);
-
-        if (aggregate.stateType() != null)
-            adder.add("state_type", aggregate.stateType().toString());
-        if (aggregate.finalFunction() != null)
-            adder.add("final_func", aggregate.finalFunction().name().name);
-        if (aggregate.initialCondition() != null)
-            adder.add("initcond", aggregate.initialCondition());
-
-        List<String> types = new ArrayList<>();
-        for (AbstractType<?> argType : aggregate.argTypes())
-            types.add(argType.toString());
-
-        adder.add("argument_types", types);
-    }
-
-    // We allow method overloads, so a function is not uniquely identified by its name only, but
-    // also by its argument types. To distinguish overloads of given function name in the schema
-    // we use a "signature" which is just a list of it's CQL argument types.
-    public static ByteBuffer functionSignatureWithTypes(AbstractFunction fun)
-    {
-        List<String> arguments =
-            fun.argTypes()
-               .stream()
-               .map(argType -> argType.asCQL3Type().toString())
-               .collect(Collectors.toList());
-
-        return ListType.getInstance(UTF8Type.instance, false).decompose(arguments);
-    }
-
-    private static void setLegacyIndexStatus(KeyspaceMetadata keyspace)
-    {
-        keyspace.tables.forEach(LegacySchemaMigratorTest::setLegacyIndexStatus);
-    }
-
-    private static void setLegacyIndexStatus(CFMetaData table)
-    {
-        table.getIndexes().forEach((index) -> setLegacyIndexStatus(table.ksName, table.cfName, index));
-    }
-
-    private static void setLegacyIndexStatus(String keyspace, String table, IndexMetadata index)
-    {
-        SystemKeyspace.setIndexBuilt(keyspace, table + '.' + index.name);
-    }
-
-    private static void verifyIndexBuildStatus(KeyspaceMetadata keyspace)
-    {
-        keyspace.tables.forEach(LegacySchemaMigratorTest::verifyIndexBuildStatus);
-    }
-
-    private static void verifyIndexBuildStatus(CFMetaData table)
-    {
-        table.getIndexes().forEach(index -> verifyIndexBuildStatus(table.ksName, table.cfName, index));
-    }
-
-    private static void verifyIndexBuildStatus(String keyspace, String table, IndexMetadata index)
-    {
-        assertFalse(SystemKeyspace.isIndexBuilt(keyspace, table + '.' + index.name));
-        assertTrue(SystemKeyspace.isIndexBuilt(keyspace, index.name));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/service/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/SerializationsTest.java b/test/unit/org/apache/cassandra/service/SerializationsTest.java
index 4df112a..4f3c80e 100644
--- a/test/unit/org/apache/cassandra/service/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/service/SerializationsTest.java
@@ -61,7 +61,7 @@ public class SerializationsTest extends AbstractSerializationsTester
         partitionerSwitcher = Util.switchPartitioner(RandomPartitioner.instance);
         RANDOM_UUID = UUID.fromString("b5c3d033-75aa-4c2f-a819-947aac7a0c54");
         FULL_RANGE = new Range<>(Util.testPartitioner().getMinimumToken(), Util.testPartitioner().getMinimumToken());
-        DESC = new RepairJobDesc(getVersion() < MessagingService.VERSION_21 ? null : RANDOM_UUID, RANDOM_UUID, "Keyspace1", "Standard1", Arrays.asList(FULL_RANGE));
+        DESC = new RepairJobDesc(RANDOM_UUID, RANDOM_UUID, "Keyspace1", "Standard1", Arrays.asList(FULL_RANGE));
     }
 
     @AfterClass

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
index c0fc277..f3d0b52 100644
--- a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
@@ -85,8 +85,9 @@ public class CompressedInputStreamTest
         assert valuesToCheck != null && valuesToCheck.length > 0;
 
         // write compressed data file of longs
-        File tmp = new File(File.createTempFile("cassandra", "unittest").getParent(), "ks-cf-ib-1-Data.db");
-        Descriptor desc = Descriptor.fromFilename(tmp.getAbsolutePath());
+        File parentDir = new File(System.getProperty("java.io.tmpdir"));
+        Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1);
+        File tmp = new File(desc.filenameFor(Component.DATA));
         MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
         CompressionParams param = CompressionParams.snappy(32);
         Map<Long, Long> index = new HashMap<Long, Long>();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/utils/BitSetTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/BitSetTest.java b/test/unit/org/apache/cassandra/utils/BitSetTest.java
index 0f51531..4dab17e 100644
--- a/test/unit/org/apache/cassandra/utils/BitSetTest.java
+++ b/test/unit/org/apache/cassandra/utils/BitSetTest.java
@@ -44,13 +44,8 @@ public class BitSetTest
     @Test
     public void compareBitSets()
     {
-        compareBitSets(false);
-        compareBitSets(true);
-    }
-    private static void compareBitSets(boolean oldBfHashOrder)
-    {
-        BloomFilter bf2 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, oldBfHashOrder);
-        BloomFilter bf3 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, true, oldBfHashOrder);
+        BloomFilter bf2 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false);
+        BloomFilter bf3 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, true);
 
         RandomStringGenerator gen1 = new KeyGenerator.RandomStringGenerator(new Random().nextInt(), FilterTestHelper.ELEMENTS);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
index 818af9c..07cbc5a 100644
--- a/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
+++ b/test/unit/org/apache/cassandra/utils/BloomFilterTest.java
@@ -38,7 +38,6 @@ import org.apache.cassandra.utils.KeyGenerator.RandomStringGenerator;
 
 public class BloomFilterTest
 {
-    public IFilter bfOldFormat;
     public IFilter bfInvHashes;
 
     public BloomFilterTest()
@@ -46,14 +45,14 @@ public class BloomFilterTest
 
     }
 
-    public static IFilter testSerialize(IFilter f, boolean oldBfHashOrder) throws IOException
+    public static IFilter testSerialize(IFilter f) throws IOException
     {
         f.add(FilterTestHelper.bytes("a"));
         DataOutputBuffer out = new DataOutputBuffer();
         FilterFactory.serialize(f, out);
 
         ByteArrayInputStream in = new ByteArrayInputStream(out.getData(), 0, out.getLength());
-        IFilter f2 = FilterFactory.deserialize(new DataInputStream(in), true, oldBfHashOrder);
+        IFilter f2 = FilterFactory.deserialize(new DataInputStream(in), true);
 
         assert f2.isPresent(FilterTestHelper.bytes("a"));
         assert !f2.isPresent(FilterTestHelper.bytes("b"));
@@ -64,14 +63,12 @@ public class BloomFilterTest
     @Before
     public void setup()
     {
-        bfOldFormat = FilterFactory.getFilter(10000L, FilterTestHelper.MAX_FAILURE_RATE, true, true);
-        bfInvHashes = FilterFactory.getFilter(10000L, FilterTestHelper.MAX_FAILURE_RATE, true, false);
+        bfInvHashes = FilterFactory.getFilter(10000L, FilterTestHelper.MAX_FAILURE_RATE, true);
     }
 
     @After
     public void destroy()
     {
-        bfOldFormat.close();
         bfInvHashes.close();
     }
 
@@ -91,10 +88,6 @@ public class BloomFilterTest
     @Test
     public void testOne()
     {
-        bfOldFormat.add(FilterTestHelper.bytes("a"));
-        assert bfOldFormat.isPresent(FilterTestHelper.bytes("a"));
-        assert !bfOldFormat.isPresent(FilterTestHelper.bytes("b"));
-
         bfInvHashes.add(FilterTestHelper.bytes("a"));
         assert bfInvHashes.isPresent(FilterTestHelper.bytes("a"));
         assert !bfInvHashes.isPresent(FilterTestHelper.bytes("b"));
@@ -103,16 +96,12 @@ public class BloomFilterTest
     @Test
     public void testFalsePositivesInt()
     {
-        FilterTestHelper.testFalsePositives(bfOldFormat, FilterTestHelper.intKeys(), FilterTestHelper.randomKeys2());
-
         FilterTestHelper.testFalsePositives(bfInvHashes, FilterTestHelper.intKeys(), FilterTestHelper.randomKeys2());
     }
 
     @Test
     public void testFalsePositivesRandom()
     {
-        FilterTestHelper.testFalsePositives(bfOldFormat, FilterTestHelper.randomKeys(), FilterTestHelper.randomKeys2());
-
         FilterTestHelper.testFalsePositives(bfInvHashes, FilterTestHelper.randomKeys(), FilterTestHelper.randomKeys2());
     }
 
@@ -123,39 +112,28 @@ public class BloomFilterTest
         {
             return;
         }
-        IFilter bf2 = FilterFactory.getFilter(KeyGenerator.WordGenerator.WORDS / 2, FilterTestHelper.MAX_FAILURE_RATE, true, false);
+        IFilter bf2 = FilterFactory.getFilter(KeyGenerator.WordGenerator.WORDS / 2, FilterTestHelper.MAX_FAILURE_RATE, true);
         int skipEven = KeyGenerator.WordGenerator.WORDS % 2 == 0 ? 0 : 2;
         FilterTestHelper.testFalsePositives(bf2,
                                             new KeyGenerator.WordGenerator(skipEven, 2),
                                             new KeyGenerator.WordGenerator(1, 2));
         bf2.close();
-
-        // new, swapped hash values bloom filter
-        bf2 = FilterFactory.getFilter(KeyGenerator.WordGenerator.WORDS / 2, FilterTestHelper.MAX_FAILURE_RATE, true, true);
-        FilterTestHelper.testFalsePositives(bf2,
-                                            new KeyGenerator.WordGenerator(skipEven, 2),
-                                            new KeyGenerator.WordGenerator(1, 2));
-        bf2.close();
     }
 
     @Test
     public void testSerialize() throws IOException
     {
-        BloomFilterTest.testSerialize(bfOldFormat, true).close();
-
-        BloomFilterTest.testSerialize(bfInvHashes, false).close();
+        BloomFilterTest.testSerialize(bfInvHashes).close();
     }
 
     @Test
     @Ignore
     public void testManyRandom()
     {
-        testManyRandom(FilterTestHelper.randomKeys(), false);
-
-        testManyRandom(FilterTestHelper.randomKeys(), true);
+        testManyRandom(FilterTestHelper.randomKeys());
     }
 
-    private static void testManyRandom(Iterator<ByteBuffer> keys, boolean oldBfHashOrder)
+    private static void testManyRandom(Iterator<ByteBuffer> keys)
     {
         int MAX_HASH_COUNT = 128;
         Set<Long> hashes = new HashSet<>();
@@ -164,7 +142,7 @@ public class BloomFilterTest
         {
             hashes.clear();
             FilterKey buf = FilterTestHelper.wrap(keys.next());
-            BloomFilter bf = (BloomFilter) FilterFactory.getFilter(10, 1, false, oldBfHashOrder);
+            BloomFilter bf = (BloomFilter) FilterFactory.getFilter(10, 1, false);
             for (long hashIndex : bf.getHashBuckets(buf, MAX_HASH_COUNT, 1024 * 1024))
             {
                 hashes.add(hashIndex);
@@ -179,41 +157,15 @@ public class BloomFilterTest
     public void testOffHeapException()
     {
         long numKeys = ((long)Integer.MAX_VALUE) * 64L + 1L; // approx 128 Billion
-        FilterFactory.getFilter(numKeys, 0.01d, true, true).close();
+        FilterFactory.getFilter(numKeys, 0.01d, true).close();
     }
 
     @Test
-    public void compareCachedKeyOldHashOrder()
+    public void compareCachedKey()
     {
-        BloomFilter bf1 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, true);
-        BloomFilter bf2 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, true);
-        BloomFilter bf3 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, true);
-
-        RandomStringGenerator gen1 = new KeyGenerator.RandomStringGenerator(new Random().nextInt(), FilterTestHelper.ELEMENTS);
-
-        // make sure all bitsets are empty.
-        BitSetTest.compare(bf1.bitset, bf2.bitset);
-        BitSetTest.compare(bf1.bitset, bf3.bitset);
-
-        while (gen1.hasNext())
-        {
-            ByteBuffer key = gen1.next();
-            FilterKey cached = FilterTestHelper.wrapCached(key);
-            bf1.add(FilterTestHelper.wrap(key));
-            bf2.add(cached);
-            bf3.add(cached);
-        }
-
-        BitSetTest.compare(bf1.bitset, bf2.bitset);
-        BitSetTest.compare(bf1.bitset, bf3.bitset);
-    }
-
-    @Test
-    public void compareCachedKeyNewHashOrder()
-    {
-        try (BloomFilter bf1 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, false);
-             BloomFilter bf2 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, false);
-             BloomFilter bf3 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false, false))
+        try (BloomFilter bf1 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false);
+             BloomFilter bf2 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false);
+             BloomFilter bf3 = (BloomFilter) FilterFactory.getFilter(FilterTestHelper.ELEMENTS / 2, FilterTestHelper.MAX_FAILURE_RATE, false))
         {
             RandomStringGenerator gen1 = new KeyGenerator.RandomStringGenerator(new Random().nextInt(), FilterTestHelper.ELEMENTS);
 
@@ -239,16 +191,10 @@ public class BloomFilterTest
     @Ignore
     public void testHugeBFSerialization() throws IOException
     {
-        hugeBFSerialization(false);
-        hugeBFSerialization(true);
-    }
-
-    static void hugeBFSerialization(boolean oldBfHashOrder) throws IOException
-    {
         ByteBuffer test = ByteBuffer.wrap(new byte[] {0, 1});
 
         File file = FileUtils.createTempFile("bloomFilterTest-", ".dat");
-        BloomFilter filter = (BloomFilter) FilterFactory.getFilter(((long) Integer.MAX_VALUE / 8) + 1, 0.01d, true, oldBfHashOrder);
+        BloomFilter filter = (BloomFilter) FilterFactory.getFilter(((long) Integer.MAX_VALUE / 8) + 1, 0.01d, true);
         filter.add(FilterTestHelper.wrap(test));
         DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(file));
         FilterFactory.serialize(filter, out);
@@ -257,7 +203,7 @@ public class BloomFilterTest
         filter.close();
 
         DataInputStream in = new DataInputStream(new FileInputStream(file));
-        BloomFilter filter2 = (BloomFilter) FilterFactory.deserialize(in, true, oldBfHashOrder);
+        BloomFilter filter2 = (BloomFilter) FilterFactory.deserialize(in, true);
         Assert.assertTrue(filter2.isPresent(FilterTestHelper.wrap(test)));
         FileUtils.closeQuietly(in);
         filter2.close();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/utils/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/SerializationsTest.java b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
index ac5a6a7..7973964 100644
--- a/test/unit/org/apache/cassandra/utils/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
@@ -46,27 +46,27 @@ public class SerializationsTest extends AbstractSerializationsTester
         DatabaseDescriptor.daemonInitialization();
     }
 
-    private static void testBloomFilterWrite(boolean offheap, boolean oldBfHashOrder) throws IOException
+    private static void testBloomFilterWrite(boolean offheap) throws IOException
     {
         IPartitioner partitioner = Util.testPartitioner();
-        try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap, oldBfHashOrder))
+        try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap))
         {
             for (int i = 0; i < 100; i++)
                 bf.add(partitioner.decorateKey(partitioner.getTokenFactory().toByteArray(partitioner.getRandomToken())));
-            try (DataOutputStreamPlus out = getOutput(oldBfHashOrder ? "2.1" : "3.0", "utils.BloomFilter.bin"))
+            try (DataOutputStreamPlus out = getOutput("3.0", "utils.BloomFilter.bin"))
             {
                 FilterFactory.serialize(bf, out);
             }
         }
     }
 
-    private static void testBloomFilterWrite1000(boolean offheap, boolean oldBfHashOrder) throws IOException
+    private static void testBloomFilterWrite1000(boolean offheap) throws IOException
     {
-        try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap, oldBfHashOrder))
+        try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap))
         {
             for (int i = 0; i < 1000; i++)
                 bf.add(Util.dk(Int32Type.instance.decompose(i)));
-            try (DataOutputStreamPlus out = getOutput(oldBfHashOrder ? "2.1" : "3.0", "utils.BloomFilter1000.bin"))
+            try (DataOutputStreamPlus out = getOutput("3.0", "utils.BloomFilter1000.bin"))
             {
                 FilterFactory.serialize(bf, out);
             }
@@ -77,29 +77,10 @@ public class SerializationsTest extends AbstractSerializationsTester
     public void testBloomFilterRead1000() throws IOException
     {
         if (EXECUTE_WRITES)
-        {
-            testBloomFilterWrite1000(true, false);
-            testBloomFilterWrite1000(true, true);
-        }
+            testBloomFilterWrite1000(true);
 
         try (DataInputStream in = getInput("3.0", "utils.BloomFilter1000.bin");
-             IFilter filter = FilterFactory.deserialize(in, true, false))
-        {
-            boolean present;
-            for (int i = 0 ; i < 1000 ; i++)
-            {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
-                Assert.assertTrue(present);
-            }
-            for (int i = 1000 ; i < 2000 ; i++)
-            {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
-                Assert.assertFalse(present);
-            }
-        }
-
-        try (DataInputStream in = getInput("2.1", "utils.BloomFilter1000.bin");
-             IFilter filter = FilterFactory.deserialize(in, true, true))
+             IFilter filter = FilterFactory.deserialize(in, true))
         {
             boolean present;
             for (int i = 0 ; i < 1000 ; i++)
@@ -113,44 +94,20 @@ public class SerializationsTest extends AbstractSerializationsTester
                 Assert.assertFalse(present);
             }
         }
-
-        // eh - reading version version 'ka' (2.1) with 3.0 BloomFilter
-        int falsePositive = 0;
-        int falseNegative = 0;
-        try (DataInputStream in = getInput("2.1", "utils.BloomFilter1000.bin");
-             IFilter filter = FilterFactory.deserialize(in, true, false))
-        {
-            boolean present;
-            for (int i = 0 ; i < 1000 ; i++)
-            {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
-                if (!present)
-                    falseNegative ++;
-            }
-            for (int i = 1000 ; i < 2000 ; i++)
-            {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
-                if (present)
-                    falsePositive ++;
-            }
-        }
-        Assert.assertEquals(1000, falseNegative);
-        Assert.assertEquals(0, falsePositive);
     }
 
     @Test
     public void testBloomFilterTable() throws Exception
     {
-        testBloomFilterTable("test/data/bloom-filter/ka/foo/foo-atable-ka-1-Filter.db", true);
-        testBloomFilterTable("test/data/bloom-filter/la/foo/la-1-big-Filter.db", false);
+        testBloomFilterTable("test/data/bloom-filter/la/foo/la-1-big-Filter.db");
     }
 
-    private static void testBloomFilterTable(String file, boolean oldBfHashOrder) throws Exception
+    private static void testBloomFilterTable(String file) throws Exception
     {
         Murmur3Partitioner partitioner = new Murmur3Partitioner();
 
         try (DataInputStream in = new DataInputStream(new FileInputStream(new File(file)));
-             IFilter filter = FilterFactory.deserialize(in, true, oldBfHashOrder))
+             IFilter filter = FilterFactory.deserialize(in, true))
         {
             for (int i = 1; i <= 10; i++)
             {
@@ -173,31 +130,6 @@ public class SerializationsTest extends AbstractSerializationsTester
         }
     }
 
-    @Test
-    public void testBloomFilterReadMURMUR3() throws IOException
-    {
-        if (EXECUTE_WRITES)
-            testBloomFilterWrite(true, true);
-
-        try (DataInputStream in = getInput("3.0", "utils.BloomFilter.bin");
-             IFilter filter = FilterFactory.deserialize(in, true, true))
-        {
-            Assert.assertNotNull(filter);
-        }
-    }
-
-    @Test
-    public void testBloomFilterReadMURMUR3pre30() throws IOException
-    {
-        if (EXECUTE_WRITES)
-            testBloomFilterWrite(true, false);
-
-        try (DataInputStream in = getInput("2.1", "utils.BloomFilter.bin");
-             IFilter filter = FilterFactory.deserialize(in, true, false))
-        {
-            Assert.assertNotNull(filter);
-        }
-    }
 
     private static void testEstimatedHistogramWrite() throws IOException
     {


[02/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/rows/DigestBackwardCompatibilityTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/rows/DigestBackwardCompatibilityTest.java b/test/unit/org/apache/cassandra/db/rows/DigestBackwardCompatibilityTest.java
deleted file mode 100644
index a72d397..0000000
--- a/test/unit/org/apache/cassandra/db/rows/DigestBackwardCompatibilityTest.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db.rows;
-
-import java.nio.ByteBuffer;
-import java.security.MessageDigest;
-
-import org.junit.Test;
-
-import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.ColumnDefinition;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.partitions.*;
-import org.apache.cassandra.db.context.CounterContext;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.CounterId;
-import org.apache.cassandra.utils.FBUtilities;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test that digest for pre-3.0 versions are properly computed (they match the value computed on pre-3.0 nodes).
- *
- * The concreted 'hard-coded' digests this file tests against have been generated on a 2.2 node using basically
- * the same test file but with 2 modifications:
- *   1. readAndDigest is modified to work on 2.2 (the actual modification is in the method as a comment)
- *   2. the assertions are replace by simple println() of the generated digest.
- *
- * Note that we only compare against 2.2 since digests should be fixed between version before 3.0 (this would be a bug
- * of previous version otherwise).
- */
-public class DigestBackwardCompatibilityTest extends CQLTester
-{
-    private ByteBuffer readAndDigest(String partitionKey)
-    {
-        /*
-         * In 2.2, this must be replaced by:
-         *   ColumnFamily partition = getCurrentColumnFamilyStore().getColumnFamily(QueryFilter.getIdentityFilter(Util.dk(partitionKey), currentTable(), System.currentTimeMillis()));
-         *   return ColumnFamily.digest(partition);
-         */
-
-        ReadCommand cmd = Util.cmd(getCurrentColumnFamilyStore(), partitionKey).build();
-        ImmutableBTreePartition partition = Util.getOnlyPartitionUnfiltered(cmd);
-        MessageDigest digest = FBUtilities.threadLocalMD5Digest();
-        UnfilteredRowIterators.digest(cmd, partition.unfilteredIterator(), digest, MessagingService.VERSION_22);
-        return ByteBuffer.wrap(digest.digest());
-    }
-
-    private void assertDigest(String expected, ByteBuffer actual)
-    {
-        String toTest = ByteBufferUtil.bytesToHex(actual);
-        assertEquals(String.format("[digest from 2.2] %s != %s [digest from 3.0]", expected, toTest), expected, toTest);
-    }
-
-    @Test
-    public void testCQLTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k text, t int, v1 text, v2 int, PRIMARY KEY (k, t))");
-
-        String key = "someKey";
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s(k, t, v1, v2) VALUES (?, ?, ?, ?) USING TIMESTAMP ? AND TTL ?", key, i, "v" + i, i, 1L, 200);
-
-        // ColumnFamily(table_0 [0::false:0@1!200,0:v1:false:2@1!200,0:v2:false:4@1!200,1::false:0@1!200,1:v1:false:2@1!200,1:v2:false:4@1!200,2::false:0@1!200,2:v1:false:2@1!200,2:v2:false:4@1!200,3::false:0@1!200,3:v1:false:2@1!200,3:v2:false:4@1!200,4::false:0@1!200,4:v1:false:2@1!200,4:v2:false:4@1!200,5::false:0@1!200,5:v1:false:2@1!200,5:v2:false:4@1!200,6::false:0@1!200,6:v1:false:2@1!200,6:v2:false:4@1!200,7::false:0@1!200,7:v1:false:2@1!200,7:v2:false:4@1!200,8::false:0@1!200,8:v1:false:2@1!200,8:v2:false:4@1!200,9::false:0@1!200,9:v1:false:2@1!200,9:v2:false:4@1!200,])
-        assertDigest("aa608035cf6574a97061b5c166b64939", readAndDigest(key));
-
-        // This is a cell deletion
-        execute("DELETE v1 FROM %s USING TIMESTAMP ? WHERE k = ? AND t = ?", 2L, key, 2);
-
-        // This is a range tombstone
-        execute("DELETE FROM %s USING TIMESTAMP ? WHERE k = ? AND t = ?", 3L, key, 4);
-
-        // This is a partition level deletion (but we use an older tombstone so it doesn't get rid of everything and keeps the test interesting)
-        execute("DELETE FROM %s USING TIMESTAMP ? WHERE k = ?", 0L, key);
-
-        // ColumnFamily(table_0 -{deletedAt=0, localDeletion=1441012270, ranges=[4:_-4:!, deletedAt=3, localDeletion=1441012270]}- [0::false:0@1!200,0:v1:false:2@1!200,0:v2:false:4@1!200,1::false:0@1!200,1:v1:false:2@1!200,1:v2:false:4@1!200,2::false:0@1!200,2:v1:true:4@2,2:v2:false:4@1!200,3::false:0@1!200,3:v1:false:2@1!200,3:v2:false:4@1!200,5::false:0@1!200,5:v1:false:2@1!200,5:v2:false:4@1!200,6::false:0@1!200,6:v1:false:2@1!200,6:v2:false:4@1!200,7::false:0@1!200,7:v1:false:2@1!200,7:v2:false:4@1!200,8::false:0@1!200,8:v1:false:2@1!200,8:v2:false:4@1!200,9::false:0@1!200,9:v1:false:2@1!200,9:v2:false:4@1!200,])
-        assertDigest("b5f38d2dc7b917d221f98ab1641f82bf", readAndDigest(key));
-    }
-
-    @Test
-    public void testCompactTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k text, t int, v text, PRIMARY KEY (k, t)) WITH COMPACT STORAGE");
-
-        String key = "someKey";
-
-        for (int i = 0; i < 10; i++)
-            execute("INSERT INTO %s(k, t, v) VALUES (?, ?, ?) USING TIMESTAMP ? AND TTL ?", key, i, "v" + i, 1L, 200);
-
-        assertDigest("44785ddd7c62c73287b448b6063645e5", readAndDigest(key));
-
-        // This is a cell deletion
-        execute("DELETE FROM %s USING TIMESTAMP ? WHERE k = ? AND t = ?", 2L, key, 2);
-
-        // This is a partition level deletion (but we use an older tombstone so it doesn't get rid of everything and keeps the test interesting)
-        execute("DELETE FROM %s USING TIMESTAMP ? WHERE k = ?", 0L, key);
-
-        assertDigest("55d9bd6335276395d83b18eb17f9abe7", readAndDigest(key));
-    }
-
-    @Test
-    public void testStaticCompactTable() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k text PRIMARY KEY, v1 text, v2 int) WITH COMPACT STORAGE");
-
-        String key = "someKey";
-        execute("INSERT INTO %s(k, v1, v2) VALUES (?, ?, ?) USING TIMESTAMP ?", key, "v", 0, 1L);
-
-        assertDigest("d2080f9f57d6edf92da1fdaaa76573d3", readAndDigest(key));
-    }
-
-    @Test
-    public void testTableWithCollection() throws Throwable
-    {
-        createTable("CREATE TABLE %s (k text PRIMARY KEY, m map<text, text>)");
-
-        String key = "someKey";
-
-        execute("INSERT INTO %s(k, m) VALUES (?, { 'foo' : 'value1', 'bar' : 'value2' }) USING TIMESTAMP ?", key, 1L);
-
-        // ColumnFamily(table_2 -{deletedAt=-9223372036854775808, localDeletion=2147483647, ranges=[m:_-m:!, deletedAt=0, localDeletion=1441012271]}- [:false:0@1,m:626172:false:6@1,m:666f6f:false:6@1,])
-        assertDigest("708f3fc8bc8149cc3513eef300bf0182", readAndDigest(key));
-
-        // This is a collection range tombstone
-        execute("DELETE m FROM %s USING TIMESTAMP ? WHERE k = ?", 2L, key);
-
-        // ColumnFamily(table_2 -{deletedAt=-9223372036854775808, localDeletion=2147483647, ranges=[m:_-m:!, deletedAt=2, localDeletion=1441012271]}- [:false:0@1,])
-        assertDigest("f39937fc3ed96956ef507e81717fa5cd", readAndDigest(key));
-    }
-
-    @Test
-    public void testCounterTable() throws Throwable
-    {
-        /*
-         * We can't use CQL to insert counters as both the timestamp and counter ID are automatically assigned and unpredictable.
-         * So we need to built it ourselves in a way that is totally equivalent between 2.2 and 3.0 which makes the test a little
-         * bit less readable. In any case, the code to generate the equivalent mutation on 2.2 is:
-         * ColumnFamily cf = ArrayBackedSortedColumns.factory.create(getCurrentColumnFamilyStore().metadata);
-         * ByteBuffer value = CounterContext.instance().createGlobal(CounterId.fromInt(1), 1L, 42L);
-         * cf.addColumn(new BufferCounterCell(CellNames.simpleSparse(new ColumnIdentifier("c", true)) , value, 0L, Long.MIN_VALUE));
-         * new Mutation(KEYSPACE, ByteBufferUtil.bytes(key), cf).applyUnsafe();
-         *
-         * Also note that we use COMPACT STORAGE only because it has no bearing on the test and was slightly easier in 2.2 to create
-         * the mutation.
-         */
-
-        createTable("CREATE TABLE %s (k text PRIMARY KEY, c counter) WITH COMPACT STORAGE");
-
-        String key = "someKey";
-
-        CFMetaData metadata = getCurrentColumnFamilyStore().metadata;
-        ColumnDefinition column = metadata.getColumnDefinition(ByteBufferUtil.bytes("c"));
-        ByteBuffer value = CounterContext.instance().createGlobal(CounterId.fromInt(1), 1L, 42L);
-        Row row = BTreeRow.singleCellRow(Clustering.STATIC_CLUSTERING, BufferCell.live(column, 0L, value));
-
-        new Mutation(PartitionUpdate.singleRowUpdate(metadata, Util.dk(key), row)).applyUnsafe();
-
-        assertDigest("3a5f7b48c320538b4cd2f829e05c6db3", readAndDigest(key));
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/hints/LegacyHintsMigratorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/hints/LegacyHintsMigratorTest.java b/test/unit/org/apache/cassandra/hints/LegacyHintsMigratorTest.java
deleted file mode 100644
index 78849e3..0000000
--- a/test/unit/org/apache/cassandra/hints/LegacyHintsMigratorTest.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.hints;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.util.*;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.config.SchemaConstants;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.UUIDType;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.db.rows.BTreeRow;
-import org.apache.cassandra.db.rows.BufferCell;
-import org.apache.cassandra.db.rows.Cell;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertTrue;
-
-import static org.apache.cassandra.hints.HintsTestUtil.assertMutationsEqual;
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-
-// TODO: test split into several files
-@SuppressWarnings("deprecation")
-public class LegacyHintsMigratorTest
-{
-    private static final String KEYSPACE = "legacy_hints_migrator_test";
-    private static final String TABLE = "table";
-
-    @BeforeClass
-    public static void defineSchema()
-    {
-        SchemaLoader.prepareServer();
-        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), SchemaLoader.standardCFMD(KEYSPACE, TABLE));
-    }
-
-    @Test
-    public void testNothingToMigrate() throws IOException
-    {
-        File directory = Files.createTempDirectory(null).toFile();
-        try
-        {
-            testNothingToMigrate(directory);
-        }
-        finally
-        {
-            directory.deleteOnExit();
-        }
-    }
-
-    private static void testNothingToMigrate(File directory)
-    {
-        // truncate system.hints to enseure nothing inside
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_HINTS).truncateBlocking();
-        new LegacyHintsMigrator(directory, 128 * 1024 * 1024).migrate();
-        HintsCatalog catalog = HintsCatalog.load(directory, HintsService.EMPTY_PARAMS);
-        assertEquals(0, catalog.stores().count());
-    }
-
-    @Test
-    public void testMigrationIsComplete() throws IOException
-    {
-        File directory = Files.createTempDirectory(null).toFile();
-        try
-        {
-            testMigrationIsComplete(directory);
-        }
-        finally
-        {
-            directory.deleteOnExit();
-        }
-    }
-
-    private static void testMigrationIsComplete(File directory)
-    {
-        long timestamp = System.currentTimeMillis();
-
-        // write 100 mutations for each of the 10 generated endpoints
-        Map<UUID, Queue<Mutation>> mutations = new HashMap<>();
-        for (int i = 0; i < 10; i++)
-        {
-            UUID hostId = UUID.randomUUID();
-            Queue<Mutation> queue = new LinkedList<>();
-            mutations.put(hostId, queue);
-
-            for (int j = 0; j < 100; j++)
-            {
-                Mutation mutation = createMutation(j, timestamp + j);
-                queue.offer(mutation);
-                Mutation legacyHint = createLegacyHint(mutation, timestamp, hostId);
-                legacyHint.applyUnsafe();
-            }
-        }
-
-        // run the migration
-        new LegacyHintsMigrator(directory, 128 * 1024 * 1024).migrate();
-
-        // validate that the hints table is truncated now
-        assertTrue(Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_HINTS).isEmpty());
-
-        HintsCatalog catalog = HintsCatalog.load(directory, HintsService.EMPTY_PARAMS);
-
-        // assert that we've correctly loaded 10 hints stores
-        assertEquals(10, catalog.stores().count());
-
-        // for each of the 10 stores, make sure the mutations have been migrated correctly
-        for (Map.Entry<UUID, Queue<Mutation>> entry : mutations.entrySet())
-        {
-            HintsStore store = catalog.get(entry.getKey());
-            assertNotNull(store);
-
-            HintsDescriptor descriptor = store.poll();
-            assertNotNull(descriptor);
-
-            // read all the hints
-            Queue<Hint> actualHints = new LinkedList<>();
-            try (HintsReader reader = HintsReader.open(new File(directory, descriptor.fileName())))
-            {
-                for (HintsReader.Page page : reader)
-                    page.hintsIterator().forEachRemaining(actualHints::offer);
-            }
-
-            // assert the size matches
-            assertEquals(100, actualHints.size());
-
-            // compare expected hints to actual hints
-            for (int i = 0; i < 100; i++)
-            {
-                Hint hint = actualHints.poll();
-                Mutation mutation = entry.getValue().poll();
-                int ttl = mutation.smallestGCGS();
-
-                assertEquals(timestamp, hint.creationTime);
-                assertEquals(ttl, hint.gcgs);
-                assertTrue(mutation + " != " + hint.mutation, Util.sameContent(mutation, hint.mutation));
-            }
-        }
-    }
-
-    // legacy hint mutation creation code, copied more or less verbatim from the previous implementation
-    private static Mutation createLegacyHint(Mutation mutation, long now, UUID targetId)
-    {
-        int version = MessagingService.VERSION_21;
-        int ttl = mutation.smallestGCGS();
-        UUID hintId = UUIDGen.getTimeUUID();
-
-        ByteBuffer key = UUIDType.instance.decompose(targetId);
-        Clustering clustering = SystemKeyspace.LegacyHints.comparator.make(hintId, version);
-        ByteBuffer value = ByteBuffer.wrap(FBUtilities.serialize(mutation, Mutation.serializer, version));
-        Cell cell = BufferCell.expiring(SystemKeyspace.LegacyHints.compactValueColumn(),
-                                        now,
-                                        ttl,
-                                        FBUtilities.nowInSeconds(),
-                                        value);
-        return new Mutation(PartitionUpdate.singleRowUpdate(SystemKeyspace.LegacyHints,
-                                                            key,
-                                                            BTreeRow.singleCellRow(clustering, cell)));
-    }
-
-    private static Mutation createMutation(int index, long timestamp)
-    {
-        CFMetaData table = Schema.instance.getCFMetaData(KEYSPACE, TABLE);
-        return new RowUpdateBuilder(table, timestamp, bytes(index))
-               .clustering(bytes(index))
-               .add("val", bytes(index))
-               .build();
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
index f19d962..d940186 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/PerSSTableIndexWriterTest.java
@@ -83,7 +83,7 @@ public class PerSSTableIndexWriterTest extends SchemaLoader
         SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName("age");
 
         File directory = cfs.getDirectories().getDirectoryForNewSSTables();
-        Descriptor descriptor = Descriptor.fromFilename(cfs.getSSTablePath(directory));
+        Descriptor descriptor = cfs.newSSTableDescriptor(directory);
         PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
 
         SortedMap<DecoratedKey, Row> expectedKeys = new TreeMap<>(DecoratedKey.comparator);
@@ -175,7 +175,7 @@ public class PerSSTableIndexWriterTest extends SchemaLoader
         SASIIndex sasi = (SASIIndex) cfs.indexManager.getIndexByName(columnName);
 
         File directory = cfs.getDirectories().getDirectoryForNewSSTables();
-        Descriptor descriptor = Descriptor.fromFilename(cfs.getSSTablePath(directory));
+        Descriptor descriptor = cfs.newSSTableDescriptor(directory);
         PerSSTableIndexWriter indexWriter = (PerSSTableIndexWriter) sasi.getFlushObserver(descriptor, OperationType.FLUSH);
 
         final long now = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index 0d2a9fb..6359566 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -103,9 +103,7 @@ public class CompressedRandomAccessReaderTest
         }
 
         try (FileHandle.Builder builder = new FileHandle.Builder(filename)
-                                                              .withCompressionMetadata(new CompressionMetadata(filename + ".metadata",
-                                                                                                               f.length(),
-                                                                                                               ChecksumType.CRC32));
+                                                              .withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length()));
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
         {
@@ -149,7 +147,7 @@ public class CompressedRandomAccessReaderTest
         }
         assert f.exists();
 
-        CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32) : null;
+        CompressionMetadata compressionMetadata = compressed ? new CompressionMetadata(filename + ".metadata", f.length()) : null;
         try (FileHandle.Builder builder = new FileHandle.Builder(filename).mmapped(usemmap).withCompressionMetadata(compressionMetadata);
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
@@ -197,7 +195,7 @@ public class CompressedRandomAccessReaderTest
         }
 
         // open compression metadata and get chunk information
-        CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length(), ChecksumType.CRC32);
+        CompressionMetadata meta = new CompressionMetadata(metadata.getPath(), file.length());
         CompressionMetadata.Chunk chunk = meta.chunkFor(0);
 
         try (FileHandle.Builder builder = new FileHandle.Builder(file.getPath()).withCompressionMetadata(meta);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index a088e20..ce95d0d 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@ -121,7 +121,7 @@ public class CompressedSequentialWriterTest extends SequentialWriterTest
         }
 
         assert f.exists();
-        try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length(), ChecksumType.CRC32));
+        try (FileHandle.Builder builder = new FileHandle.Builder(filename).withCompressionMetadata(new CompressionMetadata(filename + ".metadata", f.length()));
              FileHandle fh = builder.complete();
              RandomAccessReader reader = fh.createReader())
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
index 78964f4..4985342 100644
--- a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
@@ -64,19 +64,19 @@ public class BigTableWriterTest extends AbstractTransactionalTest
 
         private TestableBTW()
         {
-            this(cfs.getSSTablePath(cfs.getDirectories().getDirectoryForNewSSTables()));
+            this(cfs.newSSTableDescriptor(cfs.getDirectories().getDirectoryForNewSSTables()));
         }
 
-        private TestableBTW(String file)
+        private TestableBTW(Descriptor desc)
         {
-            this(file, SSTableTxnWriter.create(cfs, file, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS)));
+            this(desc, SSTableTxnWriter.create(cfs, desc, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS)));
         }
 
-        private TestableBTW(String file, SSTableTxnWriter sw)
+        private TestableBTW(Descriptor desc, SSTableTxnWriter sw)
         {
             super(sw);
-            this.file = new File(file);
-            this.descriptor = Descriptor.fromFilename(file);
+            this.file = new File(desc.filenameFor(Component.DATA));
+            this.descriptor = desc;
             this.writer = sw;
 
             for (int i = 0; i < 100; i++)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
index 64367dc..ef1b785 100644
--- a/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/DescriptorTest.java
@@ -75,27 +75,19 @@ public class DescriptorTest
 
     private void testFromFilenameFor(File dir)
     {
-        // normal
-        checkFromFilename(new Descriptor(dir, ksname, cfname, 1, SSTableFormat.Type.BIG), false);
-        // skip component (for streaming lock file)
-        checkFromFilename(new Descriptor(dir, ksname, cfname, 2, SSTableFormat.Type.BIG), true);
+        checkFromFilename(new Descriptor(dir, ksname, cfname, 1, SSTableFormat.Type.BIG));
 
         // secondary index
         String idxName = "myidx";
         File idxDir = new File(dir.getAbsolutePath() + File.separator + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName);
-        checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4, SSTableFormat.Type.BIG), false);
-
-        // legacy version
-        checkFromFilename(new Descriptor("ja", dir, ksname, cfname, 1, SSTableFormat.Type.LEGACY), false);
-        // legacy secondary index
-        checkFromFilename(new Descriptor("ja", dir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 3, SSTableFormat.Type.LEGACY), false);
+        checkFromFilename(new Descriptor(idxDir, ksname, cfname + Directories.SECONDARY_INDEX_NAME_SEPARATOR + idxName, 4, SSTableFormat.Type.BIG));
     }
 
-    private void checkFromFilename(Descriptor original, boolean skipComponent)
+    private void checkFromFilename(Descriptor original)
     {
-        File file = new File(skipComponent ? original.baseFilename() : original.filenameFor(Component.DATA));
+        File file = new File(original.filenameFor(Component.DATA));
 
-        Pair<Descriptor, String> pair = Descriptor.fromFilename(file.getParentFile(), file.getName(), skipComponent);
+        Pair<Descriptor, Component> pair = Descriptor.fromFilenameWithComponent(file);
         Descriptor desc = pair.left;
 
         assertEquals(original.directory, desc.directory);
@@ -103,15 +95,7 @@ public class DescriptorTest
         assertEquals(original.cfname, desc.cfname);
         assertEquals(original.version, desc.version);
         assertEquals(original.generation, desc.generation);
-
-        if (skipComponent)
-        {
-            assertNull(pair.right);
-        }
-        else
-        {
-            assertEquals(Component.DATA.name(), pair.right);
-        }
+        assertEquals(Component.DATA, pair.right);
     }
 
     @Test
@@ -128,20 +112,10 @@ public class DescriptorTest
     @Test
     public void validateNames()
     {
-        // TODO tmp file name probably is not handled correctly after CASSANDRA-7066
         String[] names = {
-             // old formats
-             "system-schema_keyspaces-jb-1-Data.db",
-             //"system-schema_keyspaces-tmp-jb-1-Data.db",
-             "system-schema_keyspaces-ka-1-big-Data.db",
-             //"system-schema_keyspaces-tmp-ka-1-big-Data.db",
-             // 2ndary index
-             "keyspace1-standard1.idx1-ka-1-big-Data.db",
-             // new formats
-             "la-1-big-Data.db",
-             //"tmp-la-1-big-Data.db",
+             "ma-1-big-Data.db",
              // 2ndary index
-             ".idx1" + File.separator + "la-1-big-Data.db",
+             ".idx1" + File.separator + "ma-1-big-Data.db",
         };
 
         for (String name : names)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
index f3757a0..d21b3f8 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
@@ -85,13 +85,13 @@ public class IndexSummaryTest
     {
         Pair<List<DecoratedKey>, IndexSummary> random = generateRandomIndex(100, 1);
         DataOutputBuffer dos = new DataOutputBuffer();
-        IndexSummary.serializer.serialize(random.right, dos, false);
+        IndexSummary.serializer.serialize(random.right, dos);
         // write junk
         dos.writeUTF("JUNK");
         dos.writeUTF("JUNK");
         FileUtils.closeQuietly(dos);
         DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
-        IndexSummary is = IndexSummary.serializer.deserialize(dis, partitioner, false, 1, 1);
+        IndexSummary is = IndexSummary.serializer.deserialize(dis, partitioner, 1, 1);
         for (int i = 0; i < 100; i++)
             assertEquals(i, is.binarySearch(random.left.get(i)));
         // read the junk
@@ -115,9 +115,9 @@ public class IndexSummaryTest
             assertArrayEquals(new byte[0], summary.getKey(0));
 
             DataOutputBuffer dos = new DataOutputBuffer();
-            IndexSummary.serializer.serialize(summary, dos, false);
+            IndexSummary.serializer.serialize(summary, dos);
             DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
-            IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p, false, 1, 1);
+            IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p, 1, 1);
 
             assertEquals(1, loaded.size());
             assertEquals(summary.getPosition(0), loaded.getPosition(0));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index ef993bc..8996f2a 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@ -71,7 +71,7 @@ public class LegacySSTableTest
      * See {@link #testGenerateSstables()} to generate sstables.
      * Take care on commit as you need to add the sstable files using {@code git add -f}
      */
-    public static final String[] legacyVersions = {"mc", "mb", "ma", "la", "ka", "jb"};
+    public static final String[] legacyVersions = {"mc", "mb", "ma"};
 
     // 1200 chars
     static final String longString = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +
@@ -121,9 +121,12 @@ public class LegacySSTableTest
      */
     protected Descriptor getDescriptor(String legacyVersion, String table)
     {
-        return new Descriptor(legacyVersion, getTableDir(legacyVersion, table), "legacy_tables", table, 1,
-                              BigFormat.instance.getVersion(legacyVersion).hasNewFileName()?
-                              SSTableFormat.Type.BIG :SSTableFormat.Type.LEGACY);
+        return new Descriptor(SSTableFormat.Type.BIG.info.getVersion(legacyVersion),
+                              getTableDir(legacyVersion, table),
+                              "legacy_tables",
+                              table,
+                              1,
+                              SSTableFormat.Type.BIG);
     }
 
     @Test
@@ -242,10 +245,7 @@ public class LegacySSTableTest
         CacheService.instance.invalidateKeyCache();
         Assert.assertEquals(startCount, CacheService.instance.keyCache.size());
         CacheService.instance.keyCache.loadSaved();
-        if (BigFormat.instance.getVersion(legacyVersion).storeRows())
-            Assert.assertEquals(endCount, CacheService.instance.keyCache.size());
-        else
-            Assert.assertEquals(startCount, CacheService.instance.keyCache.size());
+        Assert.assertEquals(endCount, CacheService.instance.keyCache.size());
     }
 
     private static void verifyReads(String legacyVersion)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
index 942ebe9..238dbd0 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
@@ -933,9 +933,9 @@ public class SSTableRewriterTest extends SSTableWriterTestBase
         for (int f = 0 ; f < fileCount ; f++)
         {
             File dir = cfs.getDirectories().getDirectoryForNewSSTables();
-            String filename = cfs.getSSTablePath(dir);
+            Descriptor desc = cfs.newSSTableDescriptor(dir);
 
-            try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, filename, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS)))
+            try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS)))
             {
                 int end = f == fileCount - 1 ? partitionCount : ((f + 1) * partitionCount) / fileCount;
                 for ( ; i < end ; i++)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
index df9d1aa..90b1857 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableUtils.java
@@ -217,7 +217,7 @@ public class SSTableUtils
             CFMetaData cfm = Schema.instance.getCFMetaData(ksname, cfname);
             ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(cfm.cfId);
             SerializationHeader header = appender.header();
-            SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, datafile.getAbsolutePath(), expectedSize, ActiveRepairService.UNREPAIRED_SSTABLE, 0, header);
+            SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, Descriptor.fromFilename(datafile.getAbsolutePath()), expectedSize, ActiveRepairService.UNREPAIRED_SSTABLE, 0, header);
             while (appender.append(writer)) { /* pass */ }
             Collection<SSTableReader> readers = writer.finish(true);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
index a123a22..c1f11b6 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java
@@ -163,8 +163,8 @@ public class SSTableWriterTestBase extends SchemaLoader
 
     public static SSTableWriter getWriter(ColumnFamilyStore cfs, File directory, LifecycleTransaction txn)
     {
-        String filename = cfs.getSSTablePath(directory);
-        return SSTableWriter.create(filename, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS), cfs.indexManager.listIndexes(), txn);
+        Descriptor desc = cfs.newSSTableDescriptor(directory);
+        return SSTableWriter.create(desc, 0, 0, new SerializationHeader(true, cfs.metadata, cfs.metadata.partitionColumns(), EncodingStats.NO_STATS), cfs.indexManager.listIndexes(), txn);
     }
 
     public static ByteBuffer random(int i, int size)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
deleted file mode 100644
index 7a741f9..0000000
--- a/test/unit/org/apache/cassandra/io/sstable/format/ClientModeSSTableTest.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.io.sstable.format;
-
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-
-import com.google.common.util.concurrent.Runnables;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.cassandra.concurrent.ScheduledExecutors;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.db.Slices;
-import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.marshal.BytesType;
-import org.apache.cassandra.db.rows.UnfilteredRowIterator;
-import org.apache.cassandra.dht.ByteOrderedPartitioner;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.sstable.Descriptor;
-
-/**
- * Tests backwards compatibility for SSTables
- */
-public class ClientModeSSTableTest
-{
-    public static final String LEGACY_SSTABLE_PROP = "legacy-sstable-root";
-    public static final String KSNAME = "Keyspace1";
-    public static final String CFNAME = "Standard1";
-
-    public static File LEGACY_SSTABLE_ROOT;
-
-    static CFMetaData metadata;
-
-    @BeforeClass
-    public static void defineSchema() throws ConfigurationException
-    {
-        DatabaseDescriptor.toolInitialization();
-
-        metadata = CFMetaData.Builder.createDense(KSNAME, CFNAME, false, false)
-                                                .addPartitionKey("key", BytesType.instance)
-                                                .addClusteringColumn("column", BytesType.instance)
-                                                .addRegularColumn("value", BytesType.instance)
-                                                .withPartitioner(ByteOrderedPartitioner.instance)
-                                                .build();
-
-        String scp = System.getProperty(LEGACY_SSTABLE_PROP);
-        assert scp != null;
-        LEGACY_SSTABLE_ROOT = new File(scp).getAbsoluteFile();
-        assert LEGACY_SSTABLE_ROOT.isDirectory();
-    }
-
-    /**
-     * Get a descriptor for the legacy sstable at the given version.
-     */
-    protected Descriptor getDescriptor(String ver)
-    {
-        File directory = new File(LEGACY_SSTABLE_ROOT + File.separator + ver + File.separator + KSNAME);
-        return new Descriptor(ver, directory, KSNAME, CFNAME, 0, SSTableFormat.Type.LEGACY);
-    }
-
-    @Test
-    public void testVersions() throws Throwable
-    {
-        boolean notSkipped = false;
-
-        for (File version : LEGACY_SSTABLE_ROOT.listFiles())
-        {
-            if (!new File(LEGACY_SSTABLE_ROOT + File.separator + version.getName() + File.separator + KSNAME).isDirectory())
-                continue;
-            if (Version.validate(version.getName()) && SSTableFormat.Type.LEGACY.info.getVersion(version.getName()).isCompatible())
-            {
-                notSkipped = true;
-                testVersion(version.getName());
-            }
-        }
-
-        assert notSkipped;
-    }
-
-    public void testVersion(String version) throws Throwable
-    {
-        SSTableReader reader = null;
-        try
-        {
-            reader = SSTableReader.openNoValidation(getDescriptor(version), metadata);
-
-            ByteBuffer key = bytes(Integer.toString(100));
-
-            try (UnfilteredRowIterator iter = reader.iterator(metadata.decorateKey(key), Slices.ALL, ColumnFilter.selection(metadata.partitionColumns()), false, false))
-            {
-                assert iter.next().clustering().get(0).equals(key);
-            }
-        }
-        catch (Throwable e)
-        {
-            System.err.println("Failed to read " + version);
-            throw e;
-        }
-        finally
-        {
-            if (reader != null)
-            {
-                int globalTidyCount = SSTableReader.GlobalTidy.lookup.size();
-                reader.selfRef().release();
-                assert reader.selfRef().globalCount() == 0;
-
-                // await clean-up to complete if started.
-                ScheduledExecutors.nonPeriodicTasks.submit(Runnables.doNothing()).get();
-                // Ensure clean-up completed.
-                assert SSTableReader.GlobalTidy.lookup.size() < globalTidyCount;
-            }
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
index f4c2f46..505d45d 100644
--- a/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/format/SSTableFlushObserverTest.java
@@ -89,7 +89,7 @@ public class SSTableFlushObserverTest
 
         SSTableFormat.Type sstableFormat = SSTableFormat.Type.current();
 
-        BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion().version,
+        BigTableWriter writer = new BigTableWriter(new Descriptor(sstableFormat.info.getLatestVersion(),
                                                                   directory,
                                                                   KS_NAME, CF_NAME,
                                                                   0,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
index 9df3e11..79249b6 100644
--- a/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/metadata/MetadataSerializerTest.java
@@ -102,12 +102,6 @@ public class MetadataSerializerTest
     }
 
     @Test
-    public void testLaReadLb() throws IOException
-    {
-        testOldReadsNew("la", "lb");
-    }
-
-    @Test
     public void testMaReadMb() throws IOException
     {
         testOldReadsNew("ma", "mb");
@@ -134,7 +128,8 @@ public class MetadataSerializerTest
         File statsFileLb = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(newV));
         File statsFileLa = serialize(originalMetadata, serializer, BigFormat.instance.getVersion(oldV));
         // Reading both as earlier version should yield identical results.
-        Descriptor desc = new Descriptor(oldV, statsFileLb.getParentFile(), "", "", 0, SSTableFormat.Type.current());
+        SSTableFormat.Type stype = SSTableFormat.Type.current();
+        Descriptor desc = new Descriptor(stype.info.getVersion(oldV), statsFileLb.getParentFile(), "", "", 0, stype);
         try (RandomAccessReader inLb = RandomAccessReader.open(statsFileLb);
              RandomAccessReader inLa = RandomAccessReader.open(statsFileLa))
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
index 39c9689..f6e97fb 100644
--- a/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
+++ b/test/unit/org/apache/cassandra/io/util/MmappedRegionsTest.java
@@ -314,7 +314,7 @@ public class MmappedRegionsTest
             writer.finish();
         }
 
-        CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length(), ChecksumType.CRC32);
+        CompressionMetadata metadata = new CompressionMetadata(cf.getAbsolutePath(), f.length());
         try(ChannelProxy channel = new ChannelProxy(f);
             MmappedRegions regions = MmappedRegions.map(channel, metadata))
         {


[05/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
index b405fad..019e053 100644
--- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
+++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
@@ -215,7 +215,7 @@ public class SSTableMetadataViewer
 
         try (DataInputStream iStream = new DataInputStream(new FileInputStream(summariesFile)))
         {
-            Pair<DecoratedKey, DecoratedKey> firstLast = new IndexSummary.IndexSummarySerializer().deserializeFirstLastKey(iStream, partitioner, descriptor.version.hasSamplingLevel());
+            Pair<DecoratedKey, DecoratedKey> firstLast = new IndexSummary.IndexSummarySerializer().deserializeFirstLastKey(iStream, partitioner);
             out.printf("First token: %s (key=%s)%n", firstLast.left.getToken(), keyType.getString(firstLast.left.getKey()));
             out.printf("Last token: %s (key=%s)%n", firstLast.right.getToken(), keyType.getString(firstLast.right.getKey()));
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
index 413ec4d..b97960a 100644
--- a/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
+++ b/src/java/org/apache/cassandra/tools/SSTableRepairedAtSetter.java
@@ -82,21 +82,20 @@ public class SSTableRepairedAtSetter
         for (String fname: fileNames)
         {
             Descriptor descriptor = Descriptor.fromFilename(fname);
-            if (descriptor.version.hasRepairedAt())
+            if (!descriptor.version.isCompatible())
             {
-                if (setIsRepaired)
-                {
-                    FileTime f = Files.getLastModifiedTime(new File(descriptor.filenameFor(Component.DATA)).toPath());
-                    descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, f.toMillis());
-                }
-                else
-                {
-                    descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, ActiveRepairService.UNREPAIRED_SSTABLE);
-                }
+                System.err.println("SSTable " + fname + " is in a old and unsupported format");
+                continue;
+            }
+
+            if (setIsRepaired)
+            {
+                FileTime f = Files.getLastModifiedTime(new File(descriptor.filenameFor(Component.DATA)).toPath());
+                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, f.toMillis());
             }
             else
             {
-                System.err.println("SSTable " + fname + " does not have repaired property, run upgradesstables");
+                descriptor.getMetadataSerializer().mutateRepairedAt(descriptor, ActiveRepairService.UNREPAIRED_SSTABLE);
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
index 1e57ff4..aaaa9db 100644
--- a/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
+++ b/src/java/org/apache/cassandra/tools/StandaloneSplitter.java
@@ -70,12 +70,11 @@ public class StandaloneSplitter
                     continue;
                 }
 
-                Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName());
-                if (pair == null) {
+                Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+                if (desc == null) {
                     System.out.println("Skipping non sstable file " + file);
                     continue;
                 }
-                Descriptor desc = pair.left;
 
                 if (ksName == null)
                     ksName = desc.ksname;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/utils/BloomFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/BloomFilter.java b/src/java/org/apache/cassandra/utils/BloomFilter.java
index 4ff07b7..bc52c09 100644
--- a/src/java/org/apache/cassandra/utils/BloomFilter.java
+++ b/src/java/org/apache/cassandra/utils/BloomFilter.java
@@ -37,18 +37,12 @@ public class BloomFilter extends WrappedSharedCloseable implements IFilter
 
     public final IBitSet bitset;
     public final int hashCount;
-    /**
-     * CASSANDRA-8413: 3.0 (inverted) bloom filters have no 'static' bits caused by using the same upper bits
-     * for both bloom filter and token distribution.
-     */
-    public final boolean oldBfHashOrder;
 
-    BloomFilter(int hashCount, IBitSet bitset, boolean oldBfHashOrder)
+    BloomFilter(int hashCount, IBitSet bitset)
     {
         super(bitset);
         this.hashCount = hashCount;
         this.bitset = bitset;
-        this.oldBfHashOrder = oldBfHashOrder;
     }
 
     private BloomFilter(BloomFilter copy)
@@ -56,7 +50,6 @@ public class BloomFilter extends WrappedSharedCloseable implements IFilter
         super(copy);
         this.hashCount = copy.hashCount;
         this.bitset = copy.bitset;
-        this.oldBfHashOrder = copy.oldBfHashOrder;
     }
 
     public long serializedSize()
@@ -101,13 +94,6 @@ public class BloomFilter extends WrappedSharedCloseable implements IFilter
     @Inline
     private void setIndexes(long base, long inc, int count, long max, long[] results)
     {
-        if (oldBfHashOrder)
-        {
-            long x = inc;
-            inc = base;
-            base = x;
-        }
-
         for (int i = 0; i < count; i++)
         {
             results[i] = FBUtilities.abs(base % max);
@@ -155,7 +141,7 @@ public class BloomFilter extends WrappedSharedCloseable implements IFilter
 
     public String toString()
     {
-        return "BloomFilter[hashCount=" + hashCount + ";oldBfHashOrder=" + oldBfHashOrder + ";capacity=" + bitset.capacity() + ']';
+        return "BloomFilter[hashCount=" + hashCount + ";capacity=" + bitset.capacity() + ']';
     }
 
     public void addTo(Ref.IdentityCollection identities)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
index 6f57fc8..17ab123 100644
--- a/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
+++ b/src/java/org/apache/cassandra/utils/BloomFilterSerializer.java
@@ -38,18 +38,18 @@ final class BloomFilterSerializer
         bf.bitset.serialize(out);
     }
 
-    public static BloomFilter deserialize(DataInput in, boolean oldBfHashOrder) throws IOException
+    public static BloomFilter deserialize(DataInput in) throws IOException
     {
-        return deserialize(in, false, oldBfHashOrder);
+        return deserialize(in, false);
     }
 
     @SuppressWarnings("resource")
-    public static BloomFilter deserialize(DataInput in, boolean offheap, boolean oldBfHashOrder) throws IOException
+    public static BloomFilter deserialize(DataInput in, boolean offheap) throws IOException
     {
         int hashes = in.readInt();
         IBitSet bs = offheap ? OffHeapBitSet.deserialize(in) : OpenBitSet.deserialize(in);
 
-        return new BloomFilter(hashes, bs, oldBfHashOrder);
+        return new BloomFilter(hashes, bs);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/utils/FilterFactory.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/FilterFactory.java b/src/java/org/apache/cassandra/utils/FilterFactory.java
index ddcf1bb..f79f720 100644
--- a/src/java/org/apache/cassandra/utils/FilterFactory.java
+++ b/src/java/org/apache/cassandra/utils/FilterFactory.java
@@ -40,16 +40,16 @@ public class FilterFactory
         BloomFilterSerializer.serialize((BloomFilter) bf, output);
     }
 
-    public static IFilter deserialize(DataInput input, boolean offheap, boolean oldBfHashOrder) throws IOException
+    public static IFilter deserialize(DataInput input, boolean offheap) throws IOException
     {
-        return BloomFilterSerializer.deserialize(input, offheap, oldBfHashOrder);
+        return BloomFilterSerializer.deserialize(input, offheap);
     }
 
     /**
      * @return A BloomFilter with the lowest practical false positive
      *         probability for the given number of elements.
      */
-    public static IFilter getFilter(long numElements, int targetBucketsPerElem, boolean offheap, boolean oldBfHashOrder)
+    public static IFilter getFilter(long numElements, int targetBucketsPerElem, boolean offheap)
     {
         int maxBucketsPerElement = Math.max(1, BloomCalculations.maxBucketsPerElement(numElements));
         int bucketsPerElement = Math.min(targetBucketsPerElem, maxBucketsPerElement);
@@ -58,7 +58,7 @@ public class FilterFactory
             logger.warn("Cannot provide an optimal BloomFilter for {} elements ({}/{} buckets per element).", numElements, bucketsPerElement, targetBucketsPerElem);
         }
         BloomCalculations.BloomSpecification spec = BloomCalculations.computeBloomSpec(bucketsPerElement);
-        return createFilter(spec.K, numElements, spec.bucketsPerElement, offheap, oldBfHashOrder);
+        return createFilter(spec.K, numElements, spec.bucketsPerElement, offheap);
     }
 
     /**
@@ -68,21 +68,21 @@ public class FilterFactory
      *         Asserts that the given probability can be satisfied using this
      *         filter.
      */
-    public static IFilter getFilter(long numElements, double maxFalsePosProbability, boolean offheap, boolean oldBfHashOrder)
+    public static IFilter getFilter(long numElements, double maxFalsePosProbability, boolean offheap)
     {
         assert maxFalsePosProbability <= 1.0 : "Invalid probability";
         if (maxFalsePosProbability == 1.0)
             return new AlwaysPresentFilter();
         int bucketsPerElement = BloomCalculations.maxBucketsPerElement(numElements);
         BloomCalculations.BloomSpecification spec = BloomCalculations.computeBloomSpec(bucketsPerElement, maxFalsePosProbability);
-        return createFilter(spec.K, numElements, spec.bucketsPerElement, offheap, oldBfHashOrder);
+        return createFilter(spec.K, numElements, spec.bucketsPerElement, offheap);
     }
 
     @SuppressWarnings("resource")
-    private static IFilter createFilter(int hash, long numElements, int bucketsPer, boolean offheap, boolean oldBfHashOrder)
+    private static IFilter createFilter(int hash, long numElements, int bucketsPer, boolean offheap)
     {
         long numBits = (numElements * bucketsPer) + BITSET_EXCESS;
         IBitSet bitset = offheap ? new OffHeapBitSet(numBits) : new OpenBitSet(numBits);
-        return new BloomFilter(hash, bitset, oldBfHashOrder);
+        return new BloomFilter(hash, bitset);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/utils/MerkleTree.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/MerkleTree.java b/src/java/org/apache/cassandra/utils/MerkleTree.java
index ba36345..4ada91c 100644
--- a/src/java/org/apache/cassandra/utils/MerkleTree.java
+++ b/src/java/org/apache/cassandra/utils/MerkleTree.java
@@ -845,16 +845,6 @@ public class MerkleTree implements Serializable
         {
             public void serialize(Inner inner, DataOutputPlus out, int version) throws IOException
             {
-                if (version < MessagingService.VERSION_30)
-                {
-                    if (inner.hash == null)
-                        out.writeInt(-1);
-                    else
-                    {
-                        out.writeInt(inner.hash.length);
-                        out.write(inner.hash);
-                    }
-                }
                 Token.serializer.serialize(inner.token, out, version);
                 Hashable.serializer.serialize(inner.lchild, out, version);
                 Hashable.serializer.serialize(inner.rchild, out, version);
@@ -862,13 +852,6 @@ public class MerkleTree implements Serializable
 
             public Inner deserialize(DataInput in, IPartitioner p, int version) throws IOException
             {
-                if (version < MessagingService.VERSION_30)
-                {
-                    int hashLen = in.readInt();
-                    byte[] hash = hashLen >= 0 ? new byte[hashLen] : null;
-                    if (hash != null)
-                        in.readFully(hash);
-                }
                 Token token = Token.serializer.deserialize(in, p, version);
                 Hashable lchild = Hashable.serializer.deserialize(in, p, version);
                 Hashable rchild = Hashable.serializer.deserialize(in, p, version);
@@ -877,18 +860,9 @@ public class MerkleTree implements Serializable
 
             public long serializedSize(Inner inner, int version)
             {
-                long size = 0;
-                if (version < MessagingService.VERSION_30)
-                {
-                    size += inner.hash == null
-                                       ? TypeSizes.sizeof(-1)
-                                       : TypeSizes.sizeof(inner.hash().length) + inner.hash().length;
-                }
-
-                size += Token.serializer.serializedSize(inner.token, version)
-                + Hashable.serializer.serializedSize(inner.lchild, version)
-                + Hashable.serializer.serializedSize(inner.rchild, version);
-                return size;
+                return Token.serializer.serializedSize(inner.token, version)
+                     + Hashable.serializer.serializedSize(inner.lchild, version)
+                     + Hashable.serializer.serializedSize(inner.rchild, version);
             }
         }
     }
@@ -938,24 +912,18 @@ public class MerkleTree implements Serializable
             {
                 if (leaf.hash == null)
                 {
-                    if (version < MessagingService.VERSION_30)
-                        out.writeInt(-1);
-                    else
-                        out.writeByte(-1);
+                    out.writeByte(-1);
                 }
                 else
                 {
-                    if (version < MessagingService.VERSION_30)
-                        out.writeInt(leaf.hash.length);
-                    else
-                        out.writeByte(leaf.hash.length);
+                    out.writeByte(leaf.hash.length);
                     out.write(leaf.hash);
                 }
             }
 
             public Leaf deserialize(DataInput in, IPartitioner p, int version) throws IOException
             {
-                int hashLen = version < MessagingService.VERSION_30 ? in.readInt() : in.readByte();
+                int hashLen = in.readByte();
                 byte[] hash = hashLen < 0 ? null : new byte[hashLen];
                 if (hash != null)
                     in.readFully(hash);
@@ -964,11 +932,9 @@ public class MerkleTree implements Serializable
 
             public long serializedSize(Leaf leaf, int version)
             {
-                long size = version < MessagingService.VERSION_30 ? TypeSizes.sizeof(1) : 1;
+                long size = 1;
                 if (leaf.hash != null)
-                {
                     size += leaf.hash().length;
-                }
                 return size;
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-CompressionInfo.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-CompressionInfo.db
deleted file mode 100644
index 307eeb3..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Data.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Data.db
deleted file mode 100644
index 175a5b6..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Digest.adler32 b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Digest.adler32
deleted file mode 100644
index ad624d2..0000000
--- a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-408097082
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Filter.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Filter.db
deleted file mode 100644
index 00a88b4..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Index.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Index.db
deleted file mode 100644
index c3b42d8..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Statistics.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Statistics.db
deleted file mode 100644
index 056cf17..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Summary.db b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Summary.db
deleted file mode 100644
index 453753f..0000000
Binary files a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-TOC.txt b/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-TOC.txt
deleted file mode 100644
index ceb1dab..0000000
--- a/test/data/invalid-legacy-sstables/Keyspace1/cf_with_duplicates_2_0/lb-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CompressionInfo.db
-Digest.adler32
-TOC.txt
-Filter.db
-Data.db
-Index.db
-Statistics.db
-Summary.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750790.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750790.log b/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750790.log
deleted file mode 100644
index 3301331..0000000
Binary files a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750790.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750791.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750791.log b/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750791.log
deleted file mode 100644
index 04314d6..0000000
Binary files a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750791.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750792.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750792.log b/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750792.log
deleted file mode 100644
index a9af9e4..0000000
Binary files a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750792.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750793.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750793.log b/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750793.log
deleted file mode 100644
index 3301331..0000000
Binary files a/test/data/legacy-commitlog/2.0/CommitLog-3-1431528750793.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.0/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.0/hash.txt b/test/data/legacy-commitlog/2.0/hash.txt
deleted file mode 100644
index 4bbec02..0000000
--- a/test/data/legacy-commitlog/2.0/hash.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-cfid = 4d331c44-f018-302b-91c2-2dcf94c4bfad
-cells = 9724
-hash = -682777064

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069529.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069529.log b/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069529.log
deleted file mode 100644
index 60064ee..0000000
Binary files a/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069529.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069530.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069530.log b/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069530.log
deleted file mode 100644
index fdf7071..0000000
Binary files a/test/data/legacy-commitlog/2.1/CommitLog-4-1431529069530.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.1/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.1/hash.txt b/test/data/legacy-commitlog/2.1/hash.txt
deleted file mode 100644
index f05cf97..0000000
--- a/test/data/legacy-commitlog/2.1/hash.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-cfid = 6c622920-f980-11e4-b8a0-e7d448d5e26d
-cells = 5165
-hash = -1915888171

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-bitrot/CommitLog-5-1438186885380.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-bitrot/CommitLog-5-1438186885380.log b/test/data/legacy-commitlog/2.2-lz4-bitrot/CommitLog-5-1438186885380.log
deleted file mode 100644
index d248d59..0000000
Binary files a/test/data/legacy-commitlog/2.2-lz4-bitrot/CommitLog-5-1438186885380.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-bitrot/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-bitrot/hash.txt b/test/data/legacy-commitlog/2.2-lz4-bitrot/hash.txt
deleted file mode 100644
index c4d8fe7..0000000
--- a/test/data/legacy-commitlog/2.2-lz4-bitrot/hash.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-#CommitLog bitrot test, version 2.2.0-SNAPSHOT
-#This is a copy of 2.2-lz4 with some overwritten bytes.
-#Replaying this should result in an error which can be overridden.
-cells=6051
-hash=-170208326
-cfid=dc32ce20-360d-11e5-826c-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-bitrot2/CommitLog-5-1438186885380.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-bitrot2/CommitLog-5-1438186885380.log b/test/data/legacy-commitlog/2.2-lz4-bitrot2/CommitLog-5-1438186885380.log
deleted file mode 100644
index 083d65c..0000000
Binary files a/test/data/legacy-commitlog/2.2-lz4-bitrot2/CommitLog-5-1438186885380.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-bitrot2/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-bitrot2/hash.txt b/test/data/legacy-commitlog/2.2-lz4-bitrot2/hash.txt
deleted file mode 100644
index c49dda0..0000000
--- a/test/data/legacy-commitlog/2.2-lz4-bitrot2/hash.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-#CommitLog upgrade test, version 2.2.0-SNAPSHOT
-#This is a copy of 2.2-lz4 with some overwritten bytes.
-#Replaying this should result in an error which can be overridden.
-cells=6037
-hash=-1312748407
-cfid=dc32ce20-360d-11e5-826c-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-truncated/CommitLog-5-1438186885380.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-truncated/CommitLog-5-1438186885380.log b/test/data/legacy-commitlog/2.2-lz4-truncated/CommitLog-5-1438186885380.log
deleted file mode 100644
index 939d408..0000000
Binary files a/test/data/legacy-commitlog/2.2-lz4-truncated/CommitLog-5-1438186885380.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4-truncated/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4-truncated/hash.txt b/test/data/legacy-commitlog/2.2-lz4-truncated/hash.txt
deleted file mode 100644
index ce7f600..0000000
--- a/test/data/legacy-commitlog/2.2-lz4-truncated/hash.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-#Truncated CommitLog test.
-#This is a copy of 2.2-lz4 with the last 50 bytes deleted.
-cells=6037
-hash=-889057729
-cfid=dc32ce20-360d-11e5-826c-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885380.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885380.log b/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885380.log
deleted file mode 100644
index b98304a..0000000
Binary files a/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885380.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885381.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885381.log b/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885381.log
deleted file mode 100644
index adac94f..0000000
Binary files a/test/data/legacy-commitlog/2.2-lz4/CommitLog-5-1438186885381.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-lz4/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-lz4/hash.txt b/test/data/legacy-commitlog/2.2-lz4/hash.txt
deleted file mode 100644
index 20aa6e5..0000000
--- a/test/data/legacy-commitlog/2.2-lz4/hash.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-#CommitLog upgrade test, version 2.2.0-SNAPSHOT
-#Wed Jul 29 19:21:31 EEST 2015
-cells=6052
-hash=1274136076
-cfid=dc32ce20-360d-11e5-826c-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915514.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915514.log b/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915514.log
deleted file mode 100644
index e69dfb7..0000000
Binary files a/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915514.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915515.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915515.log b/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915515.log
deleted file mode 100644
index 3e06675..0000000
Binary files a/test/data/legacy-commitlog/2.2-snappy/CommitLog-5-1438186915515.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2-snappy/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2-snappy/hash.txt b/test/data/legacy-commitlog/2.2-snappy/hash.txt
deleted file mode 100644
index f3dd72e..0000000
--- a/test/data/legacy-commitlog/2.2-snappy/hash.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-#CommitLog upgrade test, version 2.2.0-SNAPSHOT
-#Wed Jul 29 19:22:01 EEST 2015
-cells=6051
-hash=881633109
-cfid=ee2fe860-360d-11e5-951c-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815314.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815314.log b/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815314.log
deleted file mode 100644
index 5032519..0000000
Binary files a/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815314.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815315.log
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815315.log b/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815315.log
deleted file mode 100644
index 34a02fe..0000000
Binary files a/test/data/legacy-commitlog/2.2/CommitLog-5-1438186815315.log and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-commitlog/2.2/hash.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-commitlog/2.2/hash.txt b/test/data/legacy-commitlog/2.2/hash.txt
deleted file mode 100644
index 64f9dbb..0000000
--- a/test/data/legacy-commitlog/2.2/hash.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-#CommitLog upgrade test, version 2.2.0-SNAPSHOT
-#Wed Jul 29 19:20:21 EEST 2015
-cells=6366
-hash=-802535821
-cfid=b28a7000-360d-11e5-ae92-afadad37221d

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-CRC.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-CRC.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-CRC.db
deleted file mode 100644
index 0b6dab4..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-CRC.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Data.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Data.db
deleted file mode 100644
index 7d9407e..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Digest.sha1 b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Digest.sha1
deleted file mode 100644
index 963bd9b..0000000
--- a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4a9f1896a599e4b3ff5d19600901de1a0b851bc1  Keyspace1-Standard1-jb-0-Data.db
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Filter.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Filter.db
deleted file mode 100644
index a3a807c..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Index.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Index.db
deleted file mode 100644
index ee9f5fb..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Statistics.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Statistics.db
deleted file mode 100644
index daec1c3..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Summary.db b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Summary.db
deleted file mode 100644
index 1fbe040..0000000
Binary files a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt b/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt
deleted file mode 100644
index d3aa557..0000000
--- a/test/data/legacy-sstables/jb/Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Index.db
-TOC.txt
-Summary.db
-Filter.db
-Statistics.db
-Data.db
-CRC.db
-Digest.sha1

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-CompressionInfo.db
deleted file mode 100644
index 6d49922..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Data.db
deleted file mode 100644
index 326498b..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Index.db
deleted file mode 100644
index 44b89c4..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Statistics.db
deleted file mode 100644
index a9a404a..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Summary.db
deleted file mode 100644
index 266c494..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-TOC.txt
deleted file mode 100644
index abc3147..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust/legacy_tables-legacy_jb_clust-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-CompressionInfo.db
-Statistics.db
-Filter.db
-Data.db
-TOC.txt
-Index.db
-Summary.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-CompressionInfo.db
deleted file mode 100644
index 5eddda7..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Data.db
deleted file mode 100644
index 61ef270..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Index.db
deleted file mode 100644
index 9e18f8e..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Statistics.db
deleted file mode 100644
index ab83acc..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Summary.db
deleted file mode 100644
index 896a529..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-TOC.txt
deleted file mode 100644
index b67360a..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_compact/legacy_tables-legacy_jb_clust_compact-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Data.db
-CompressionInfo.db
-Index.db
-Summary.db
-TOC.txt
-Statistics.db
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-CompressionInfo.db
deleted file mode 100644
index fe2e257..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Data.db
deleted file mode 100644
index 12c8fdc..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Index.db
deleted file mode 100644
index 51ddf91..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Statistics.db
deleted file mode 100644
index a5eff40..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Summary.db
deleted file mode 100644
index 750a780..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-TOC.txt
deleted file mode 100644
index abc3147..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter/legacy_tables-legacy_jb_clust_counter-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-CompressionInfo.db
-Statistics.db
-Filter.db
-Data.db
-TOC.txt
-Index.db
-Summary.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-CompressionInfo.db
deleted file mode 100644
index 34d459d..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Data.db
deleted file mode 100644
index b511d30..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Index.db
deleted file mode 100644
index 10df1e8..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Statistics.db
deleted file mode 100644
index aa3c757..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Summary.db
deleted file mode 100644
index 896a529..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-TOC.txt
deleted file mode 100644
index b67360a..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_clust_counter_compact/legacy_tables-legacy_jb_clust_counter_compact-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Data.db
-CompressionInfo.db
-Index.db
-Summary.db
-TOC.txt
-Statistics.db
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-CompressionInfo.db
deleted file mode 100644
index c80e64c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Data.db
deleted file mode 100644
index 401fe93..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Index.db
deleted file mode 100644
index f0717e0..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Statistics.db
deleted file mode 100644
index a2bcfaf..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Summary.db
deleted file mode 100644
index af5e781..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-TOC.txt
deleted file mode 100644
index abc3147..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple/legacy_tables-legacy_jb_simple-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-CompressionInfo.db
-Statistics.db
-Filter.db
-Data.db
-TOC.txt
-Index.db
-Summary.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-CompressionInfo.db
deleted file mode 100644
index d530b73..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Data.db
deleted file mode 100644
index c7e8586..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Index.db
deleted file mode 100644
index d2ec218..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Statistics.db
deleted file mode 100644
index 792e733..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Summary.db
deleted file mode 100644
index af5e781..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-TOC.txt
deleted file mode 100644
index b67360a..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_compact/legacy_tables-legacy_jb_simple_compact-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Data.db
-CompressionInfo.db
-Index.db
-Summary.db
-TOC.txt
-Statistics.db
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-CompressionInfo.db
deleted file mode 100644
index 9c3416e..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Data.db
deleted file mode 100644
index b72f790..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Index.db
deleted file mode 100644
index 932936c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Statistics.db
deleted file mode 100644
index 6baf1de..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Summary.db
deleted file mode 100644
index af5e781..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-TOC.txt
deleted file mode 100644
index abc3147..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter/legacy_tables-legacy_jb_simple_counter-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-CompressionInfo.db
-Statistics.db
-Filter.db
-Data.db
-TOC.txt
-Index.db
-Summary.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-CompressionInfo.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-CompressionInfo.db
deleted file mode 100644
index 01c5478..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Data.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Data.db
deleted file mode 100644
index f545b04..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Filter.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Index.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Index.db
deleted file mode 100644
index 48c153c..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Statistics.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Statistics.db
deleted file mode 100644
index 8657050..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Summary.db b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Summary.db
deleted file mode 100644
index af5e781..0000000
Binary files a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-TOC.txt b/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-TOC.txt
deleted file mode 100644
index b67360a..0000000
--- a/test/data/legacy-sstables/jb/legacy_tables/legacy_jb_simple_counter_compact/legacy_tables-legacy_jb_simple_counter_compact-jb-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Data.db
-CompressionInfo.db
-Index.db
-Summary.db
-TOC.txt
-Statistics.db
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-CompressionInfo.db
deleted file mode 100644
index 69a8355..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Data.db
deleted file mode 100644
index 7acbf92..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Digest.sha1
deleted file mode 100644
index fef7106..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4293822635
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Index.db
deleted file mode 100644
index 44b89c4..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Statistics.db
deleted file mode 100644
index 5f07da5..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-TOC.txt
deleted file mode 100644
index 7be41d8..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust/legacy_tables-legacy_ka_clust-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Index.db
-Digest.sha1
-CompressionInfo.db
-Data.db
-Statistics.db
-Summary.db
-TOC.txt
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-CompressionInfo.db
deleted file mode 100644
index 654094e..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Data.db
deleted file mode 100644
index 4c87e07..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Digest.sha1
deleted file mode 100644
index 4690757..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1331331706
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Index.db
deleted file mode 100644
index 9e18f8e..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Statistics.db
deleted file mode 100644
index ab55258..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Summary.db
deleted file mode 100644
index 774cbd1..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-Summary.db and /dev/null differ


[08/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/filter/RowFilter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/filter/RowFilter.java b/src/java/org/apache/cassandra/db/filter/RowFilter.java
index 4c0608f..5baf783 100644
--- a/src/java/org/apache/cassandra/db/filter/RowFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/RowFilter.java
@@ -509,16 +509,12 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
         {
             public void serialize(Expression expression, DataOutputPlus out, int version) throws IOException
             {
-                if (version >= MessagingService.VERSION_30)
-                    out.writeByte(expression.kind().ordinal());
+                out.writeByte(expression.kind().ordinal());
 
                 // Custom expressions include neither a column or operator, but all
-                // other expressions do. Also, custom expressions are 3.0+ only, so
-                // the column & operator will always be the first things written for
-                // any pre-3.0 version
+                // other expressions do.
                 if (expression.kind() == Kind.CUSTOM)
                 {
-                    assert version >= MessagingService.VERSION_30;
                     IndexMetadata.serializer.serialize(((CustomExpression)expression).targetIndex, out, version);
                     ByteBufferUtil.writeWithShortLength(expression.value, out);
                     return;
@@ -526,7 +522,6 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
 
                 if (expression.kind() == Kind.USER)
                 {
-                    assert version >= MessagingService.VERSION_30;
                     UserExpression.serialize((UserExpression)expression, out, version);
                     return;
                 }
@@ -541,15 +536,8 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
                         break;
                     case MAP_EQUALITY:
                         MapEqualityExpression mexpr = (MapEqualityExpression)expression;
-                        if (version < MessagingService.VERSION_30)
-                        {
-                            ByteBufferUtil.writeWithShortLength(mexpr.getIndexValue(), out);
-                        }
-                        else
-                        {
-                            ByteBufferUtil.writeWithShortLength(mexpr.key, out);
-                            ByteBufferUtil.writeWithShortLength(mexpr.value, out);
-                        }
+                        ByteBufferUtil.writeWithShortLength(mexpr.key, out);
+                        ByteBufferUtil.writeWithShortLength(mexpr.value, out);
                         break;
                     case THRIFT_DYN_EXPR:
                         ByteBufferUtil.writeWithShortLength(((ThriftExpression)expression).value, out);
@@ -559,62 +547,33 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
 
             public Expression deserialize(DataInputPlus in, int version, CFMetaData metadata) throws IOException
             {
-                Kind kind = null;
-                ByteBuffer name;
-                Operator operator;
-                ColumnDefinition column;
+                Kind kind = Kind.values()[in.readByte()];
 
-                if (version >= MessagingService.VERSION_30)
+                // custom expressions (3.0+ only) do not contain a column or operator, only a value
+                if (kind == Kind.CUSTOM)
                 {
-                    kind = Kind.values()[in.readByte()];
-                    // custom expressions (3.0+ only) do not contain a column or operator, only a value
-                    if (kind == Kind.CUSTOM)
-                    {
-                        return new CustomExpression(metadata,
-                                                    IndexMetadata.serializer.deserialize(in, version, metadata),
-                                                    ByteBufferUtil.readWithShortLength(in));
-                    }
-
-                    if (kind == Kind.USER)
-                    {
-                        return UserExpression.deserialize(in, version, metadata);
-                    }
+                    return new CustomExpression(metadata,
+                            IndexMetadata.serializer.deserialize(in, version, metadata),
+                            ByteBufferUtil.readWithShortLength(in));
                 }
 
-                name = ByteBufferUtil.readWithShortLength(in);
-                operator = Operator.readFrom(in);
-                column = metadata.getColumnDefinition(name);
+                if (kind == Kind.USER)
+                    return UserExpression.deserialize(in, version, metadata);
+
+                ByteBuffer name = ByteBufferUtil.readWithShortLength(in);
+                Operator operator = Operator.readFrom(in);
+                ColumnDefinition column = metadata.getColumnDefinition(name);
+
                 if (!metadata.isCompactTable() && column == null)
                     throw new RuntimeException("Unknown (or dropped) column " + UTF8Type.instance.getString(name) + " during deserialization");
 
-                if (version < MessagingService.VERSION_30)
-                {
-                    if (column == null)
-                        kind = Kind.THRIFT_DYN_EXPR;
-                    else if (column.type instanceof MapType && operator == Operator.EQ)
-                        kind = Kind.MAP_EQUALITY;
-                    else
-                        kind = Kind.SIMPLE;
-                }
-
-                assert kind != null;
                 switch (kind)
                 {
                     case SIMPLE:
                         return new SimpleExpression(column, operator, ByteBufferUtil.readWithShortLength(in));
                     case MAP_EQUALITY:
-                        ByteBuffer key, value;
-                        if (version < MessagingService.VERSION_30)
-                        {
-                            ByteBuffer composite = ByteBufferUtil.readWithShortLength(in);
-                            key = CompositeType.extractComponent(composite, 0);
-                            value = CompositeType.extractComponent(composite, 0);
-                        }
-                        else
-                        {
-                            key = ByteBufferUtil.readWithShortLength(in);
-                            value = ByteBufferUtil.readWithShortLength(in);
-                        }
+                        ByteBuffer key = ByteBufferUtil.readWithShortLength(in);
+                        ByteBuffer value = ByteBufferUtil.readWithShortLength(in);
                         return new MapEqualityExpression(column, key, operator, value);
                     case THRIFT_DYN_EXPR:
                         return new ThriftExpression(metadata, name, operator, ByteBufferUtil.readWithShortLength(in));
@@ -622,16 +581,12 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
                 throw new AssertionError();
             }
 
-
             public long serializedSize(Expression expression, int version)
             {
-                // version 3.0+ includes a byte for Kind
-                long size = version >= MessagingService.VERSION_30 ? 1 : 0;
+                long size = 1; // kind byte
 
                 // Custom expressions include neither a column or operator, but all
-                // other expressions do. Also, custom expressions are 3.0+ only, so
-                // the column & operator will always be the first things written for
-                // any pre-3.0 version
+                // other expressions do.
                 if (expression.kind() != Kind.CUSTOM && expression.kind() != Kind.USER)
                     size += ByteBufferUtil.serializedSizeWithShortLength(expression.column().name.bytes)
                             + expression.operator.serializedSize();
@@ -643,23 +598,19 @@ public abstract class RowFilter implements Iterable<RowFilter.Expression>
                         break;
                     case MAP_EQUALITY:
                         MapEqualityExpression mexpr = (MapEqualityExpression)expression;
-                        if (version < MessagingService.VERSION_30)
-                            size += ByteBufferUtil.serializedSizeWithShortLength(mexpr.getIndexValue());
-                        else
-                            size += ByteBufferUtil.serializedSizeWithShortLength(mexpr.key)
-                                  + ByteBufferUtil.serializedSizeWithShortLength(mexpr.value);
+                        size += ByteBufferUtil.serializedSizeWithShortLength(mexpr.key)
+                              + ByteBufferUtil.serializedSizeWithShortLength(mexpr.value);
                         break;
                     case THRIFT_DYN_EXPR:
                         size += ByteBufferUtil.serializedSizeWithShortLength(((ThriftExpression)expression).value);
                         break;
                     case CUSTOM:
-                        if (version >= MessagingService.VERSION_30)
-                            size += IndexMetadata.serializer.serializedSize(((CustomExpression)expression).targetIndex, version)
-                                   + ByteBufferUtil.serializedSizeWithShortLength(expression.value);
+                        size += IndexMetadata.serializer.serializedSize(((CustomExpression)expression).targetIndex, version)
+                               + ByteBufferUtil.serializedSizeWithShortLength(expression.value);
                         break;
                     case USER:
-                        if (version >= MessagingService.VERSION_30)
-                            size += UserExpression.serializedSize((UserExpression)expression, version);
+                        size += UserExpression.serializedSize((UserExpression)expression, version);
+                        break;
                 }
                 return size;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
index b95a310..1ed961f 100644
--- a/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
+++ b/src/java/org/apache/cassandra/db/partitions/PartitionUpdate.java
@@ -238,12 +238,10 @@ public class PartitionUpdate extends AbstractBTreePartition
      *
      * @param bytes the byte buffer that contains the serialized update.
      * @param version the version with which the update is serialized.
-     * @param key the partition key for the update. This is only used if {@code version &lt 3.0}
-     * and can be {@code null} otherwise.
      *
      * @return the deserialized update or {@code null} if {@code bytes == null}.
      */
-    public static PartitionUpdate fromBytes(ByteBuffer bytes, int version, DecoratedKey key)
+    public static PartitionUpdate fromBytes(ByteBuffer bytes, int version)
     {
         if (bytes == null)
             return null;
@@ -252,8 +250,7 @@ public class PartitionUpdate extends AbstractBTreePartition
         {
             return serializer.deserialize(new DataInputBuffer(bytes, true),
                                           version,
-                                          SerializationHelper.Flag.LOCAL,
-                                          version < MessagingService.VERSION_30 ? key : null);
+                                          SerializationHelper.Flag.LOCAL);
         }
         catch (IOException e)
         {
@@ -780,47 +777,12 @@ public class PartitionUpdate extends AbstractBTreePartition
             {
                 assert !iter.isReverseOrder();
 
-                if (version < MessagingService.VERSION_30)
-                {
-                    LegacyLayout.serializeAsLegacyPartition(null, iter, out, version);
-                }
-                else
-                {
-                    CFMetaData.serializer.serialize(update.metadata(), out, version);
-                    UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, update.rowCount());
-                }
+                CFMetaData.serializer.serialize(update.metadata(), out, version);
+                UnfilteredRowIteratorSerializer.serializer.serialize(iter, null, out, version, update.rowCount());
             }
         }
 
-        public PartitionUpdate deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException
-        {
-            if (version >= MessagingService.VERSION_30)
-            {
-                assert key == null; // key is only there for the old format
-                return deserialize30(in, version, flag);
-            }
-            else
-            {
-                assert key != null;
-                return deserializePre30(in, version, flag, key);
-            }
-        }
-
-        // Used to share same decorated key between updates.
-        public PartitionUpdate deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag, DecoratedKey key) throws IOException
-        {
-            if (version >= MessagingService.VERSION_30)
-            {
-                return deserialize30(in, version, flag);
-            }
-            else
-            {
-                assert key != null;
-                return deserializePre30(in, version, flag, key.getKey());
-            }
-        }
-
-        private static PartitionUpdate deserialize30(DataInputPlus in, int version, SerializationHelper.Flag flag) throws IOException
+        public PartitionUpdate deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag) throws IOException
         {
             CFMetaData metadata = CFMetaData.serializer.deserialize(in, version);
             UnfilteredRowIteratorSerializer.Header header = UnfilteredRowIteratorSerializer.serializer.deserializeHeader(metadata, null, in, version, flag);
@@ -854,22 +816,10 @@ public class PartitionUpdate extends AbstractBTreePartition
                                        false);
         }
 
-        private static PartitionUpdate deserializePre30(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException
-        {
-            try (UnfilteredRowIterator iterator = LegacyLayout.deserializeLegacyPartition(in, version, flag, key))
-            {
-                assert iterator != null; // This is only used in mutation, and mutation have never allowed "null" column families
-                return PartitionUpdate.fromIterator(iterator, ColumnFilter.all(iterator.metadata()));
-            }
-        }
-
         public long serializedSize(PartitionUpdate update, int version)
         {
             try (UnfilteredRowIterator iter = update.unfilteredIterator())
             {
-                if (version < MessagingService.VERSION_30)
-                    return LegacyLayout.serializedSizeAsLegacyPartition(null, iter, version);
-
                 return CFMetaData.serializer.serializedSize(update.metadata(), version)
                      + UnfilteredRowIteratorSerializer.serializer.serializedSize(iter, null, version, update.rowCount());
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java b/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java
index 852d95e..bcc8d4d 100644
--- a/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java
+++ b/src/java/org/apache/cassandra/db/partitions/UnfilteredPartitionIterators.java
@@ -252,13 +252,11 @@ public abstract class UnfilteredPartitionIterators
     /**
      * Digests the the provided iterator.
      *
-     * @param command the command that has yield {@code iterator}. This can be null if {@code version >= MessagingService.VERSION_30}
-     * as this is only used when producing digest to be sent to legacy nodes.
      * @param iterator the iterator to digest.
      * @param digest the {@code MessageDigest} to use for the digest.
      * @param version the messaging protocol to use when producing the digest.
      */
-    public static void digest(ReadCommand command, UnfilteredPartitionIterator iterator, MessageDigest digest, int version)
+    public static void digest(UnfilteredPartitionIterator iterator, MessageDigest digest, int version)
     {
         try (UnfilteredPartitionIterator iter = iterator)
         {
@@ -266,7 +264,7 @@ public abstract class UnfilteredPartitionIterators
             {
                 try (UnfilteredRowIterator partition = iter.next())
                 {
-                    UnfilteredRowIterators.digest(command, partition, digest, version);
+                    UnfilteredRowIterators.digest(partition, digest, version);
                 }
             }
         }
@@ -303,8 +301,6 @@ public abstract class UnfilteredPartitionIterators
     {
         public void serialize(UnfilteredPartitionIterator iter, ColumnFilter selection, DataOutputPlus out, int version) throws IOException
         {
-            assert version >= MessagingService.VERSION_30; // We handle backward compatibility directy in ReadResponse.LegacyRangeSliceReplySerializer
-
             out.writeBoolean(iter.isForThrift());
             while (iter.hasNext())
             {
@@ -319,7 +315,6 @@ public abstract class UnfilteredPartitionIterators
 
         public UnfilteredPartitionIterator deserialize(final DataInputPlus in, final int version, final CFMetaData metadata, final ColumnFilter selection, final SerializationHelper.Flag flag) throws IOException
         {
-            assert version >= MessagingService.VERSION_30; // We handle backward compatibility directy in ReadResponse.LegacyRangeSliceReplySerializer
             final boolean isForThrift = in.readBoolean();
 
             return new AbstractUnfilteredPartitionIterator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
index 14730ac..30a0a37 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIteratorWithLowerBound.java
@@ -211,12 +211,11 @@ public class UnfilteredRowIteratorWithLowerBound extends LazilyInitializedUnfilt
 
     /**
      * @return true if we can use the clustering values in the stats of the sstable:
-     * - we need the latest stats file format (or else the clustering values create clusterings with the wrong size)
-     * - we cannot create tombstone bounds from these values only and so we rule out sstables with tombstones
+     * we cannot create tombstone bounds from these values only and so we rule out sstables with tombstones
      */
     private boolean canUseMetadataLowerBound()
     {
-        return !sstable.hasTombstones() && sstable.descriptor.version.hasNewStatsFile();
+        return !sstable.hasTombstones();
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java
index 46447ec..004783e 100644
--- a/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java
+++ b/src/java/org/apache/cassandra/db/rows/UnfilteredRowIterators.java
@@ -143,20 +143,12 @@ public abstract class UnfilteredRowIterators
     /**
      * Digests the partition represented by the provided iterator.
      *
-     * @param command the command that has yield {@code iterator}. This can be null if {@code version >= MessagingService.VERSION_30}
-     * as this is only used when producing digest to be sent to legacy nodes.
      * @param iterator the iterator to digest.
      * @param digest the {@code MessageDigest} to use for the digest.
      * @param version the messaging protocol to use when producing the digest.
      */
-    public static void digest(ReadCommand command, UnfilteredRowIterator iterator, MessageDigest digest, int version)
+    public static void digest(UnfilteredRowIterator iterator, MessageDigest digest, int version)
     {
-        if (version < MessagingService.VERSION_30)
-        {
-            LegacyLayout.fromUnfilteredRowIterator(command, iterator).digest(iterator.metadata(), digest);
-            return;
-        }
-
         digest.update(iterator.partitionKey().getKey().duplicate());
         iterator.partitionLevelDeletion().digest(digest);
         iterator.columns().regulars.digest(digest);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/dht/AbstractBounds.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/dht/AbstractBounds.java b/src/java/org/apache/cassandra/dht/AbstractBounds.java
index 298c316..7a603b0 100644
--- a/src/java/org/apache/cassandra/dht/AbstractBounds.java
+++ b/src/java/org/apache/cassandra/dht/AbstractBounds.java
@@ -184,6 +184,9 @@ public abstract class AbstractBounds<T extends RingPosition<T>> implements Seria
              * The first int tells us if it's a range or bounds (depending on the value) _and_ if it's tokens or keys (depending on the
              * sign). We use negative kind for keys so as to preserve the serialization of token from older version.
              */
+            // !WARNING! While we don't support the pre-3.0 messaging protocol, we serialize the token range in the
+            // system table (see SystemKeypsace.rangeToBytes) using the old/pre-3.0 format and until we deal with that
+            // problem, we have to preserve this code.
             if (version < MessagingService.VERSION_30)
                 out.writeInt(kindInt(range));
             else
@@ -195,6 +198,7 @@ public abstract class AbstractBounds<T extends RingPosition<T>> implements Seria
         public AbstractBounds<T> deserialize(DataInput in, IPartitioner p, int version) throws IOException
         {
             boolean isToken, startInclusive, endInclusive;
+            // !WARNING! See serialize method above for why we still need to have that condition.
             if (version < MessagingService.VERSION_30)
             {
                 int kind = in.readInt();
@@ -226,6 +230,7 @@ public abstract class AbstractBounds<T extends RingPosition<T>> implements Seria
 
         public long serializedSize(AbstractBounds<T> ab, int version)
         {
+            // !WARNING! See serialize method above for why we still need to have that condition.
             int size = version < MessagingService.VERSION_30
                      ? TypeSizes.sizeof(kindInt(ab))
                      : 1;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/gms/Gossiper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/gms/Gossiper.java b/src/java/org/apache/cassandra/gms/Gossiper.java
index 212f88c..c4c3872 100644
--- a/src/java/org/apache/cassandra/gms/Gossiper.java
+++ b/src/java/org/apache/cassandra/gms/Gossiper.java
@@ -979,12 +979,6 @@ public class Gossiper implements IFailureDetectionEventListener, GossiperMBean
 
     private void markAlive(final InetAddress addr, final EndpointState localState)
     {
-        if (MessagingService.instance().getVersion(addr) < MessagingService.VERSION_20)
-        {
-            realMarkAlive(addr, localState);
-            return;
-        }
-
         localState.markDead();
 
         MessageOut<EchoMessage> echoMessage = new MessageOut<EchoMessage>(MessagingService.Verb.ECHO, EchoMessage.instance, EchoMessage.serializer);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/hints/LegacyHintsMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hints/LegacyHintsMigrator.java b/src/java/org/apache/cassandra/hints/LegacyHintsMigrator.java
deleted file mode 100644
index 50d8b6e..0000000
--- a/src/java/org/apache/cassandra/hints/LegacyHintsMigrator.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.hints;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.SchemaConstants;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.compaction.CompactionManager;
-import org.apache.cassandra.db.marshal.UUIDType;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.util.DataInputBuffer;
-import org.apache.cassandra.io.util.FileUtils;
-import org.apache.cassandra.serializers.MarshalException;
-import org.apache.cassandra.utils.FBUtilities;
-
-/**
- * A migrator that goes through the legacy system.hints table and writes all the hints to the new hints storage format.
- */
-@SuppressWarnings("deprecation")
-public final class LegacyHintsMigrator
-{
-    private static final Logger logger = LoggerFactory.getLogger(LegacyHintsMigrator.class);
-
-    private final File hintsDirectory;
-    private final long maxHintsFileSize;
-
-    private final ColumnFamilyStore legacyHintsTable;
-    private final int pageSize;
-
-    public LegacyHintsMigrator(File hintsDirectory, long maxHintsFileSize)
-    {
-        this.hintsDirectory = hintsDirectory;
-        this.maxHintsFileSize = maxHintsFileSize;
-
-        legacyHintsTable = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_HINTS);
-        pageSize = calculatePageSize(legacyHintsTable);
-    }
-
-    // read fewer columns (mutations) per page if they are very large
-    private static int calculatePageSize(ColumnFamilyStore legacyHintsTable)
-    {
-        int size = 128;
-
-        int meanCellCount = legacyHintsTable.getMeanColumns();
-        double meanPartitionSize = legacyHintsTable.getMeanPartitionSize();
-
-        if (meanCellCount != 0 && meanPartitionSize != 0)
-        {
-            int avgHintSize = (int) meanPartitionSize / meanCellCount;
-            size = Math.max(2, Math.min(size, (512 << 10) / avgHintSize));
-        }
-
-        return size;
-    }
-
-    public void migrate()
-    {
-        // nothing to migrate
-        if (legacyHintsTable.isEmpty())
-            return;
-        logger.info("Migrating legacy hints to new storage");
-
-        // major-compact all of the existing sstables to get rid of the tombstones + expired hints
-        logger.info("Forcing a major compaction of {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS);
-        compactLegacyHints();
-
-        // paginate over legacy hints and write them to the new storage
-        logger.info("Writing legacy hints to the new storage");
-        migrateLegacyHints();
-
-        // truncate the legacy hints table
-        logger.info("Truncating {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS);
-        legacyHintsTable.truncateBlocking();
-    }
-
-    private void compactLegacyHints()
-    {
-        Collection<Descriptor> descriptors = new ArrayList<>();
-        legacyHintsTable.getTracker().getUncompacting().forEach(sstable -> descriptors.add(sstable.descriptor));
-        if (!descriptors.isEmpty())
-            forceCompaction(descriptors);
-    }
-
-    private void forceCompaction(Collection<Descriptor> descriptors)
-    {
-        try
-        {
-            CompactionManager.instance.submitUserDefined(legacyHintsTable, descriptors, FBUtilities.nowInSeconds()).get();
-        }
-        catch (InterruptedException | ExecutionException e)
-        {
-            throw new RuntimeException(e);
-        }
-    }
-
-    private void migrateLegacyHints()
-    {
-        ByteBuffer buffer = ByteBuffer.allocateDirect(256 * 1024);
-        String query = String.format("SELECT DISTINCT target_id FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS);
-        //noinspection ConstantConditions
-        QueryProcessor.executeInternal(query).forEach(row -> migrateLegacyHints(row.getUUID("target_id"), buffer));
-        FileUtils.clean(buffer);
-    }
-
-    private void migrateLegacyHints(UUID hostId, ByteBuffer buffer)
-    {
-        String query = String.format("SELECT target_id, hint_id, message_version, mutation, ttl(mutation) AS ttl, writeTime(mutation) AS write_time " +
-                                     "FROM %s.%s " +
-                                     "WHERE target_id = ?",
-                                     SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                     SystemKeyspace.LEGACY_HINTS);
-
-        // read all the old hints (paged iterator), write them in the new format
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize, hostId);
-        migrateLegacyHints(hostId, rows, buffer);
-
-        // delete the whole partition in the legacy table; we would truncate the whole table afterwards, but this allows
-        // to not lose progress in case of a terminated conversion
-        deleteLegacyHintsPartition(hostId);
-    }
-
-    private void migrateLegacyHints(UUID hostId, UntypedResultSet rows, ByteBuffer buffer)
-    {
-        migrateLegacyHints(hostId, rows.iterator(), buffer);
-    }
-
-    private void migrateLegacyHints(UUID hostId, Iterator<UntypedResultSet.Row> iterator, ByteBuffer buffer)
-    {
-        do
-        {
-            migrateLegacyHintsInternal(hostId, iterator, buffer);
-            // if there are hints that didn't fit in the previous file, keep calling the method to write to a new
-            // file until we get everything written.
-        }
-        while (iterator.hasNext());
-    }
-
-    private void migrateLegacyHintsInternal(UUID hostId, Iterator<UntypedResultSet.Row> iterator, ByteBuffer buffer)
-    {
-        HintsDescriptor descriptor = new HintsDescriptor(hostId, System.currentTimeMillis());
-
-        try (HintsWriter writer = HintsWriter.create(hintsDirectory, descriptor))
-        {
-            try (HintsWriter.Session session = writer.newSession(buffer))
-            {
-                while (iterator.hasNext())
-                {
-                    Hint hint = convertLegacyHint(iterator.next());
-                    if (hint != null)
-                        session.append(hint);
-
-                    if (session.position() >= maxHintsFileSize)
-                        break;
-                }
-            }
-        }
-        catch (IOException e)
-        {
-            throw new FSWriteError(e, descriptor.fileName());
-        }
-    }
-
-    private static Hint convertLegacyHint(UntypedResultSet.Row row)
-    {
-        Mutation mutation = deserializeLegacyMutation(row);
-        if (mutation == null)
-            return null;
-
-        long creationTime = row.getLong("write_time"); // milliseconds, not micros, for the hints table
-        int expirationTime = FBUtilities.nowInSeconds() + row.getInt("ttl");
-        int originalGCGS = expirationTime - (int) TimeUnit.MILLISECONDS.toSeconds(creationTime);
-
-        int gcgs = Math.min(originalGCGS, mutation.smallestGCGS());
-
-        return Hint.create(mutation, creationTime, gcgs);
-    }
-
-    private static Mutation deserializeLegacyMutation(UntypedResultSet.Row row)
-    {
-        try (DataInputBuffer dib = new DataInputBuffer(row.getBlob("mutation"), true))
-        {
-            Mutation mutation = Mutation.serializer.deserialize(dib,
-                                                                row.getInt("message_version"));
-            mutation.getPartitionUpdates().forEach(PartitionUpdate::validate);
-            return mutation;
-        }
-        catch (IOException e)
-        {
-            logger.error("Failed to migrate a hint for {} from legacy {}.{} table",
-                         row.getUUID("target_id"),
-                         SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                         SystemKeyspace.LEGACY_HINTS,
-                         e);
-            return null;
-        }
-        catch (MarshalException e)
-        {
-            logger.warn("Failed to validate a hint for {} from legacy {}.{} table - skipping",
-                        row.getUUID("target_id"),
-                        SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                        SystemKeyspace.LEGACY_HINTS,
-                        e);
-            return null;
-        }
-    }
-
-    private static void deleteLegacyHintsPartition(UUID hostId)
-    {
-        // intentionally use millis, like the rest of the legacy implementation did, just in case
-        Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(SystemKeyspace.LegacyHints,
-                                                                             UUIDType.instance.decompose(hostId),
-                                                                             System.currentTimeMillis(),
-                                                                             FBUtilities.nowInSeconds()));
-        mutation.applyUnsafe();
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/ForwardingVersionedSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/ForwardingVersionedSerializer.java b/src/java/org/apache/cassandra/io/ForwardingVersionedSerializer.java
deleted file mode 100644
index 64f91d7..0000000
--- a/src/java/org/apache/cassandra/io/ForwardingVersionedSerializer.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.io;
-
-import java.io.IOException;
-
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-
-/**
- * A serializer which forwards all its method calls to another serializer. Subclasses should override one or more
- * methods to modify the behavior of the backing serializer as desired per the decorator pattern.
- */
-public abstract class ForwardingVersionedSerializer<T> implements IVersionedSerializer<T>
-{
-    protected ForwardingVersionedSerializer()
-    {
-    }
-
-    /**
-     * Returns the backing delegate instance that methods are forwarded to.
-     *
-     * @param version the server version
-     * @return the backing delegate instance that methods are forwarded to.
-     */
-    protected abstract IVersionedSerializer<T> delegate(int version);
-
-    public void serialize(T t, DataOutputPlus out, int version) throws IOException
-    {
-        delegate(version).serialize(t, out, version);
-    }
-
-    public T deserialize(DataInputPlus in, int version) throws IOException
-    {
-        return delegate(version).deserialize(in, version);
-    }
-
-    public long serializedSize(T t, int version)
-    {
-        return delegate(version).serializedSize(t, version);
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index 9a4d919..7d98570 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -71,7 +71,6 @@ public class CompressionMetadata
     private final long chunkOffsetsSize;
     public final String indexFilePath;
     public final CompressionParams parameters;
-    public final ChecksumType checksumType;
 
     /**
      * Create metadata about given compressed file including uncompressed data length, chunk size
@@ -87,14 +86,13 @@ public class CompressionMetadata
     public static CompressionMetadata create(String dataFilePath)
     {
         Descriptor desc = Descriptor.fromFilename(dataFilePath);
-        return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO), new File(dataFilePath).length(), desc.version.compressedChecksumType());
+        return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO), new File(dataFilePath).length());
     }
 
     @VisibleForTesting
-    public CompressionMetadata(String indexFilePath, long compressedLength, ChecksumType checksumType)
+    public CompressionMetadata(String indexFilePath, long compressedLength)
     {
         this.indexFilePath = indexFilePath;
-        this.checksumType = checksumType;
 
         try (DataInputStream stream = new DataInputStream(new FileInputStream(indexFilePath)))
         {
@@ -133,7 +131,7 @@ public class CompressionMetadata
         this.chunkOffsetsSize = chunkOffsets.size();
     }
 
-    private CompressionMetadata(String filePath, CompressionParams parameters, SafeMemory offsets, long offsetsSize, long dataLength, long compressedLength, ChecksumType checksumType)
+    private CompressionMetadata(String filePath, CompressionParams parameters, SafeMemory offsets, long offsetsSize, long dataLength, long compressedLength)
     {
         this.indexFilePath = filePath;
         this.parameters = parameters;
@@ -141,7 +139,6 @@ public class CompressionMetadata
         this.compressedFileLength = compressedLength;
         this.chunkOffsets = offsets;
         this.chunkOffsetsSize = offsetsSize;
-        this.checksumType = checksumType;
     }
 
     public ICompressor compressor()
@@ -417,7 +414,7 @@ public class CompressionMetadata
             if (count < this.count)
                 compressedLength = offsets.getLong(count * 8L);
 
-            return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32);
+            return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength);
         }
 
         /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
index 9a8f968..7efca63 100644
--- a/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/AbstractSSTableSimpleWriter.java
@@ -18,7 +18,7 @@
 package org.apache.cassandra.io.sstable;
 
 import java.io.File;
-import java.io.FilenameFilter;
+import java.io.FileFilter;
 import java.io.IOException;
 import java.io.Closeable;
 import java.nio.ByteBuffer;
@@ -90,12 +90,11 @@ abstract class AbstractSSTableSimpleWriter implements Closeable
     private static int getNextGeneration(File directory, final String columnFamily)
     {
         final Set<Descriptor> existing = new HashSet<>();
-        directory.list(new FilenameFilter()
+        directory.listFiles(new FileFilter()
         {
-            public boolean accept(File dir, String name)
+            public boolean accept(File file)
             {
-                Pair<Descriptor, Component> p = SSTable.tryComponentFromFilename(dir, name);
-                Descriptor desc = p == null ? null : p.left;
+                Descriptor desc = SSTable.tryDescriptorFromFilename(file);
                 if (desc == null)
                     return false;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/Component.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/Component.java b/src/java/org/apache/cassandra/io/sstable/Component.java
index 38152af..469a25c 100644
--- a/src/java/org/apache/cassandra/io/sstable/Component.java
+++ b/src/java/org/apache/cassandra/io/sstable/Component.java
@@ -50,8 +50,8 @@ public class Component
         COMPRESSION_INFO("CompressionInfo.db"),
         // statistical metadata about the content of the sstable
         STATS("Statistics.db"),
-        // holds adler32 checksum of the data file
-        DIGEST("Digest.crc32", "Digest.adler32", "Digest.sha1"),
+        // holds CRC32 checksum of the data file
+        DIGEST("Digest.crc32"),
         // holds the CRC32 for chunks in an a uncompressed file.
         CRC("CRC.db"),
         // holds SSTable Index Summary (sampling of Index component)
@@ -61,16 +61,11 @@ public class Component
         // built-in secondary index (may be multiple per sstable)
         SECONDARY_INDEX("SI_.*.db"),
         // custom component, used by e.g. custom compaction strategy
-        CUSTOM(new String[] { null });
+        CUSTOM(null);
         
-        final String[] repr;
+        final String repr;
         Type(String repr)
         {
-            this(new String[] { repr });
-        }
-
-        Type(String... repr)
-        {
             this.repr = repr;
         }
 
@@ -78,9 +73,7 @@ public class Component
         {
             for (Type type : TYPES)
             {
-                if (type.repr == null || type.repr.length == 0 || type.repr[0] == null)
-                    continue;
-                if (Pattern.matches(type.repr[0], repr))
+                if (type.repr != null && Pattern.matches(type.repr, repr))
                     return type;
             }
             return CUSTOM;
@@ -93,36 +86,18 @@ public class Component
     public final static Component FILTER = new Component(Type.FILTER);
     public final static Component COMPRESSION_INFO = new Component(Type.COMPRESSION_INFO);
     public final static Component STATS = new Component(Type.STATS);
-    private static final String digestCrc32 = "Digest.crc32";
-    private static final String digestAdler32 = "Digest.adler32";
-    private static final String digestSha1 = "Digest.sha1";
-    public final static Component DIGEST_CRC32 = new Component(Type.DIGEST, digestCrc32);
-    public final static Component DIGEST_ADLER32 = new Component(Type.DIGEST, digestAdler32);
-    public final static Component DIGEST_SHA1 = new Component(Type.DIGEST, digestSha1);
+    public final static Component DIGEST = new Component(Type.DIGEST);
     public final static Component CRC = new Component(Type.CRC);
     public final static Component SUMMARY = new Component(Type.SUMMARY);
     public final static Component TOC = new Component(Type.TOC);
 
-    public static Component digestFor(ChecksumType checksumType)
-    {
-        switch (checksumType)
-        {
-            case Adler32:
-                return DIGEST_ADLER32;
-            case CRC32:
-                return DIGEST_CRC32;
-        }
-        throw new AssertionError();
-    }
-
     public final Type type;
     public final String name;
     public final int hashCode;
 
     public Component(Type type)
     {
-        this(type, type.repr[0]);
-        assert type.repr.length == 1;
+        this(type, type.repr);
         assert type != Type.CUSTOM;
     }
 
@@ -143,45 +118,32 @@ public class Component
     }
 
     /**
-     * {@code
-     * Filename of the form "<ksname>/<cfname>-[tmp-][<version>-]<gen>-<component>",
-     * }
-     * @return A Descriptor for the SSTable, and a Component for this particular file.
-     * TODO move descriptor into Component field
+     * Parse the component part of a sstable filename into a {@code Component} object.
+     *
+     * @param name a string representing a sstable component.
+     * @return the component corresponding to {@code name}. Note that this always return a component as an unrecognized
+     * name is parsed into a CUSTOM component.
      */
-    public static Pair<Descriptor,Component> fromFilename(File directory, String name)
+    static Component parse(String name)
     {
-        Pair<Descriptor,String> path = Descriptor.fromFilename(directory, name);
+        Type type = Type.fromRepresentation(name);
 
-        // parse the component suffix
-        Type type = Type.fromRepresentation(path.right);
-        // build (or retrieve singleton for) the component object
-        Component component;
-        switch(type)
+        // Build (or retrieve singleton for) the component object
+        switch (type)
         {
-            case DATA:              component = Component.DATA;                         break;
-            case PRIMARY_INDEX:     component = Component.PRIMARY_INDEX;                break;
-            case FILTER:            component = Component.FILTER;                       break;
-            case COMPRESSION_INFO:  component = Component.COMPRESSION_INFO;             break;
-            case STATS:             component = Component.STATS;                        break;
-            case DIGEST:            switch (path.right)
-                                    {
-                                        case digestCrc32:   component = Component.DIGEST_CRC32;     break;
-                                        case digestAdler32: component = Component.DIGEST_ADLER32;   break;
-                                        case digestSha1:    component = Component.DIGEST_SHA1;      break;
-                                        default:            throw new IllegalArgumentException("Invalid digest component " + path.right);
-                                    }
-                                    break;
-            case CRC:               component = Component.CRC;                          break;
-            case SUMMARY:           component = Component.SUMMARY;                      break;
-            case TOC:               component = Component.TOC;                          break;
-            case SECONDARY_INDEX:   component = new Component(Type.SECONDARY_INDEX, path.right); break;
-            case CUSTOM:            component = new Component(Type.CUSTOM, path.right); break;
-            default:
-                 throw new IllegalStateException();
+            case DATA:             return Component.DATA;
+            case PRIMARY_INDEX:    return Component.PRIMARY_INDEX;
+            case FILTER:           return Component.FILTER;
+            case COMPRESSION_INFO: return Component.COMPRESSION_INFO;
+            case STATS:            return Component.STATS;
+            case DIGEST:           return Component.DIGEST;
+            case CRC:              return Component.CRC;
+            case SUMMARY:          return Component.SUMMARY;
+            case TOC:              return Component.TOC;
+            case SECONDARY_INDEX:  return new Component(Type.SECONDARY_INDEX, name);
+            case CUSTOM:           return new Component(Type.CUSTOM, name);
+            default:               throw new AssertionError();
         }
-
-        return Pair.create(path.left, component);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/Descriptor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/Descriptor.java b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
index 1f7e67f..3804fd8 100644
--- a/src/java/org/apache/cassandra/io/sstable/Descriptor.java
+++ b/src/java/org/apache/cassandra/io/sstable/Descriptor.java
@@ -26,12 +26,12 @@ import java.util.regex.Pattern;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.CharMatcher;
 import com.google.common.base.Objects;
+import com.google.common.base.Splitter;
 
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.io.sstable.format.SSTableFormat;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.sstable.metadata.IMetadataSerializer;
-import org.apache.cassandra.io.sstable.metadata.LegacyMetadataSerializer;
 import org.apache.cassandra.io.sstable.metadata.MetadataSerializer;
 import org.apache.cassandra.utils.Pair;
 
@@ -46,8 +46,13 @@ import static org.apache.cassandra.io.sstable.Component.separator;
  */
 public class Descriptor
 {
+    private final static String LEGACY_TMP_REGEX_STR = "^((.*)\\-(.*)\\-)?tmp(link)?\\-((?:l|k).)\\-(\\d)*\\-(.*)$";
+    private final static Pattern LEGACY_TMP_REGEX = Pattern.compile(LEGACY_TMP_REGEX_STR);
+
     public static String TMP_EXT = ".tmp";
 
+    private static final Splitter filenameSplitter = Splitter.on('-');
+
     /** canonicalized path to the directory where SSTable resides */
     public final File directory;
     /** version has the following format: <code>[a-z]+</code> */
@@ -56,8 +61,6 @@ public class Descriptor
     public final String cfname;
     public final int generation;
     public final SSTableFormat.Type formatType;
-    /** digest component - might be {@code null} for old, legacy sstables */
-    public final Component digestComponent;
     private final int hashCode;
 
     /**
@@ -66,7 +69,7 @@ public class Descriptor
     @VisibleForTesting
     public Descriptor(File directory, String ksname, String cfname, int generation)
     {
-        this(SSTableFormat.Type.current().info.getLatestVersion(), directory, ksname, cfname, generation, SSTableFormat.Type.current(), null);
+        this(SSTableFormat.Type.current().info.getLatestVersion(), directory, ksname, cfname, generation, SSTableFormat.Type.current());
     }
 
     /**
@@ -74,16 +77,10 @@ public class Descriptor
      */
     public Descriptor(File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
     {
-        this(formatType.info.getLatestVersion(), directory, ksname, cfname, generation, formatType, Component.digestFor(formatType.info.getLatestVersion().uncompressedChecksumType()));
-    }
-
-    @VisibleForTesting
-    public Descriptor(String version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
-    {
-        this(formatType.info.getVersion(version), directory, ksname, cfname, generation, formatType, Component.digestFor(formatType.info.getLatestVersion().uncompressedChecksumType()));
+        this(formatType.info.getLatestVersion(), directory, ksname, cfname, generation, formatType);
     }
 
-    public Descriptor(Version version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType, Component digestComponent)
+    public Descriptor(Version version, File directory, String ksname, String cfname, int generation, SSTableFormat.Type formatType)
     {
         assert version != null && directory != null && ksname != null && cfname != null && formatType.info.getLatestVersion().getClass().equals(version.getClass());
         this.version = version;
@@ -99,24 +96,18 @@ public class Descriptor
         this.cfname = cfname;
         this.generation = generation;
         this.formatType = formatType;
-        this.digestComponent = digestComponent;
 
         hashCode = Objects.hashCode(version, this.directory, generation, ksname, cfname, formatType);
     }
 
     public Descriptor withGeneration(int newGeneration)
     {
-        return new Descriptor(version, directory, ksname, cfname, newGeneration, formatType, digestComponent);
+        return new Descriptor(version, directory, ksname, cfname, newGeneration, formatType);
     }
 
     public Descriptor withFormatType(SSTableFormat.Type newType)
     {
-        return new Descriptor(newType.info.getLatestVersion(), directory, ksname, cfname, generation, newType, digestComponent);
-    }
-
-    public Descriptor withDigestComponent(Component newDigestComponent)
-    {
-        return new Descriptor(version, directory, ksname, cfname, generation, formatType, newDigestComponent);
+        return new Descriptor(newType.info.getLatestVersion(), directory, ksname, cfname, generation, newType);
     }
 
     public String tmpFilenameFor(Component component)
@@ -139,15 +130,9 @@ public class Descriptor
 
     private void appendFileName(StringBuilder buff)
     {
-        if (!version.hasNewFileName())
-        {
-            buff.append(ksname).append(separator);
-            buff.append(cfname).append(separator);
-        }
         buff.append(version).append(separator);
         buff.append(generation);
-        if (formatType != SSTableFormat.Type.LEGACY)
-            buff.append(separator).append(formatType.name);
+        buff.append(separator).append(formatType.name);
     }
 
     public String relativeFilenameFor(Component component)
@@ -176,155 +161,156 @@ public class Descriptor
         return ret;
     }
 
-    /**
-     *  Files obsoleted by CASSANDRA-7066 : temporary files and compactions_in_progress. We support
-     *  versions 2.1 (ka) and 2.2 (la).
-     *  Temporary files have tmp- or tmplink- at the beginning for 2.2 sstables or after ks-cf- for 2.1 sstables
-     */
-
-    private final static String LEGACY_COMP_IN_PROG_REGEX_STR = "^compactions_in_progress(\\-[\\d,a-f]{32})?$";
-    private final static Pattern LEGACY_COMP_IN_PROG_REGEX = Pattern.compile(LEGACY_COMP_IN_PROG_REGEX_STR);
-    private final static String LEGACY_TMP_REGEX_STR = "^((.*)\\-(.*)\\-)?tmp(link)?\\-((?:l|k).)\\-(\\d)*\\-(.*)$";
-    private final static Pattern LEGACY_TMP_REGEX = Pattern.compile(LEGACY_TMP_REGEX_STR);
-
-    public static boolean isLegacyFile(File file)
+    public static boolean isValidFile(File file)
     {
-        if (file.isDirectory())
-            return file.getParentFile() != null &&
-                   file.getParentFile().getName().equalsIgnoreCase("system") &&
-                   LEGACY_COMP_IN_PROG_REGEX.matcher(file.getName()).matches();
-        else
-            return LEGACY_TMP_REGEX.matcher(file.getName()).matches();
-    }
-
-    public static boolean isValidFile(String fileName)
-    {
-        return fileName.endsWith(".db") && !LEGACY_TMP_REGEX.matcher(fileName).matches();
+        String filename = file.getName();
+        return filename.endsWith(".db") && !LEGACY_TMP_REGEX.matcher(filename).matches();
     }
 
     /**
-     * @see #fromFilename(File directory, String name)
-     * @param filename The SSTable filename
-     * @return Descriptor of the SSTable initialized from filename
+     * Parse a sstable filename into a Descriptor.
+     * <p>
+     * This is a shortcut for {@code fromFilename(new File(filename))}.
+     *
+     * @param filename the filename to a sstable component.
+     * @return the descriptor for the parsed file.
+     *
+     * @throws IllegalArgumentException if the provided {@code file} does point to a valid sstable filename. This could
+     * mean either that the filename doesn't look like a sstable file, or that it is for an old and unsupported
+     * versions.
      */
     public static Descriptor fromFilename(String filename)
     {
-        return fromFilename(filename, false);
-    }
-
-    public static Descriptor fromFilename(String filename, SSTableFormat.Type formatType)
-    {
-        return fromFilename(filename).withFormatType(formatType);
+        return fromFilename(new File(filename));
     }
 
-    public static Descriptor fromFilename(String filename, boolean skipComponent)
-    {
-        File file = new File(filename).getAbsoluteFile();
-        return fromFilename(file.getParentFile(), file.getName(), skipComponent).left;
-    }
-
-    public static Pair<Descriptor, String> fromFilename(File directory, String name)
+    /**
+     * Parse a sstable filename into a Descriptor.
+     * <p>
+     * SSTables files are all located within subdirectories of the form {@code <keyspace>/<table>/}. Normal sstables are
+     * are directly within that subdirectory structure while 2ndary index, backups and snapshot are each inside an
+     * additional subdirectory. The file themselves have the form:
+     *   {@code <version>-<gen>-<format>-<component>}.
+     * <p>
+     * Note that this method will only sucessfully parse sstable files of supported versions.
+     *
+     * @param file the {@code File} object for the filename to parse.
+     * @return the descriptor for the parsed file.
+     *
+     * @throws IllegalArgumentException if the provided {@code file} does point to a valid sstable filename. This could
+     * mean either that the filename doesn't look like a sstable file, or that it is for an old and unsupported
+     * versions.
+     */
+    public static Descriptor fromFilename(File file)
     {
-        return fromFilename(directory, name, false);
+        return fromFilenameWithComponent(file).left;
     }
 
     /**
-     * Filename of the form is vary by version:
+     * Parse a sstable filename, extracting both the {@code Descriptor} and {@code Component} part.
      *
-     * <ul>
-     *     <li>&lt;ksname&gt;-&lt;cfname&gt;-(tmp-)?&lt;version&gt;-&lt;gen&gt;-&lt;component&gt; for cassandra 2.0 and before</li>
-     *     <li>(&lt;tmp marker&gt;-)?&lt;version&gt;-&lt;gen&gt;-&lt;component&gt; for cassandra 3.0 and later</li>
-     * </ul>
+     * @param file the {@code File} object for the filename to parse.
+     * @return a pair of the descriptor and component corresponding to the provided {@code file}.
      *
-     * If this is for SSTable of secondary index, directory should ends with index name for 2.1+.
-     *
-     * @param directory The directory of the SSTable files
-     * @param name The name of the SSTable file
-     * @param skipComponent true if the name param should not be parsed for a component tag
-     *
-     * @return A Descriptor for the SSTable, and the Component remainder.
+     * @throws IllegalArgumentException if the provided {@code file} does point to a valid sstable filename. This could
+     * mean either that the filename doesn't look like a sstable file, or that it is for an old and unsupported
+     * versions.
      */
-    public static Pair<Descriptor, String> fromFilename(File directory, String name, boolean skipComponent)
+    public static Pair<Descriptor, Component> fromFilenameWithComponent(File file)
     {
-        File parentDirectory = directory != null ? directory : new File(".");
+        // We need to extract the keyspace and table names from the parent directories, so make sure we deal with the
+        // absolute path.
+        if (!file.isAbsolute())
+            file = file.getAbsoluteFile();
 
-        // tokenize the filename
-        StringTokenizer st = new StringTokenizer(name, String.valueOf(separator));
-        String nexttok;
+        String name = file.getName();
+        List<String> tokens = filenameSplitter.splitToList(name);
+        int size = tokens.size();
 
-        // read tokens backwards to determine version
-        Deque<String> tokenStack = new ArrayDeque<>();
-        while (st.hasMoreTokens())
+        if (size != 4)
         {
-            tokenStack.push(st.nextToken());
+            // This is an invalid sstable file for this version. But to provide a more helpful error message, we detect
+            // old format sstable, which had the format:
+            //   <keyspace>-<table>-(tmp-)?<version>-<gen>-<component>
+            // Note that we assume it's an old format sstable if it has the right number of tokens: this is not perfect
+            // but we're just trying to be helpful, not perfect.
+            if (size == 5 || size == 6)
+                throw new IllegalArgumentException(String.format("%s is of version %s which is now unsupported and cannot be read.",
+                                                                 name,
+                                                                 tokens.get(size - 3)));
+            throw new IllegalArgumentException(String.format("Invalid sstable file %s: the name doesn't look like a supported sstable file name", name));
         }
 
-        // component suffix
-        String component = skipComponent ? null : tokenStack.pop();
+        String versionString = tokens.get(0);
+        if (!Version.validate(versionString))
+            throw invalidSSTable(name, "invalid version %s", versionString);
 
-        nexttok = tokenStack.pop();
-        // generation OR format type
-        SSTableFormat.Type fmt = SSTableFormat.Type.LEGACY;
-        if (!CharMatcher.DIGIT.matchesAllOf(nexttok))
+        int generation;
+        try
         {
-            fmt = SSTableFormat.Type.validate(nexttok);
-            nexttok = tokenStack.pop();
+            generation = Integer.parseInt(tokens.get(1));
+        }
+        catch (NumberFormatException e)
+        {
+            throw invalidSSTable(name, "the 'generation' part of the name doesn't parse as a number");
         }
 
-        // generation
-        int generation = Integer.parseInt(nexttok);
+        String formatString = tokens.get(2);
+        SSTableFormat.Type format;
+        try
+        {
+            format = SSTableFormat.Type.validate(formatString);
+        }
+        catch (IllegalArgumentException e)
+        {
+            throw invalidSSTable(name, "unknown 'format' part (%s)", formatString);
+        }
 
-        // version
-        nexttok = tokenStack.pop();
+        Component component = Component.parse(tokens.get(3));
 
-        if (!Version.validate(nexttok))
-            throw new UnsupportedOperationException("SSTable " + name + " is too old to open.  Upgrade to 2.0 first, and run upgradesstables");
+        Version version = format.info.getVersion(versionString);
+        if (!version.isCompatible())
+            throw invalidSSTable(name, "incompatible sstable version (%s); you should have run upgradesstables before upgrading", versionString);
 
-        Version version = fmt.info.getVersion(nexttok);
+        File directory = parentOf(name, file);
+        File tableDir = directory;
 
-        // ks/cf names
-        String ksname, cfname;
-        if (version.hasNewFileName())
+        // Check if it's a 2ndary index directory (not that it doesn't exclude it to be also a backup or snapshot)
+        String indexName = "";
+        if (tableDir.getName().startsWith(Directories.SECONDARY_INDEX_NAME_SEPARATOR))
         {
-            // for 2.1+ read ks and cf names from directory
-            File cfDirectory = parentDirectory;
-            // check if this is secondary index
-            String indexName = "";
-            if (cfDirectory.getName().startsWith(Directories.SECONDARY_INDEX_NAME_SEPARATOR))
-            {
-                indexName = cfDirectory.getName();
-                cfDirectory = cfDirectory.getParentFile();
-            }
-            if (cfDirectory.getName().equals(Directories.BACKUPS_SUBDIR))
-            {
-                cfDirectory = cfDirectory.getParentFile();
-            }
-            else if (cfDirectory.getParentFile().getName().equals(Directories.SNAPSHOT_SUBDIR))
-            {
-                cfDirectory = cfDirectory.getParentFile().getParentFile();
-            }
-            cfname = cfDirectory.getName().split("-")[0] + indexName;
-            ksname = cfDirectory.getParentFile().getName();
+            indexName = tableDir.getName();
+            tableDir = parentOf(name, tableDir);
         }
-        else
-        {
-            cfname = tokenStack.pop();
-            ksname = tokenStack.pop();
-        }
-        assert tokenStack.isEmpty() : "Invalid file name " + name + " in " + directory;
 
-        return Pair.create(new Descriptor(version, parentDirectory, ksname, cfname, generation, fmt,
-                                          // _assume_ version from version
-                                          Component.digestFor(version.uncompressedChecksumType())),
-                           component);
+        // Then it can be a backup or a snapshot
+        if (tableDir.getName().equals(Directories.BACKUPS_SUBDIR))
+            tableDir = tableDir.getParentFile();
+        else if (parentOf(name, tableDir).getName().equals(Directories.SNAPSHOT_SUBDIR))
+            tableDir = parentOf(name, parentOf(name, tableDir));
+
+        String table = tableDir.getName().split("-")[0] + indexName;
+        String keyspace = parentOf(name, tableDir).getName();
+
+        return Pair.create(new Descriptor(version, directory, keyspace, table, generation, format), component);
+    }
+
+    private static File parentOf(String name, File file)
+    {
+        File parent = file.getParentFile();
+        if (parent == null)
+            throw invalidSSTable(name, "cannot extract keyspace and table name; make sure the sstable is in the proper sub-directories");
+        return parent;
+    }
+
+    private static IllegalArgumentException invalidSSTable(String name, String msgFormat, Object... parameters)
+    {
+        throw new IllegalArgumentException(String.format("Invalid sstable file " + name + ": " + msgFormat, parameters));
     }
 
     public IMetadataSerializer getMetadataSerializer()
     {
-        if (version.hasNewStatsFile())
-            return new MetadataSerializer();
-        else
-            return new LegacyMetadataSerializer();
+        return new MetadataSerializer();
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
index 9ee1996..03246c5 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java
@@ -19,11 +19,14 @@
 package org.apache.cassandra.io.sstable;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.cassandra.db.ClusteringPrefix;
 import org.apache.cassandra.db.DeletionTime;
 import org.apache.cassandra.db.RowIndexEntry;
+import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.marshal.AbstractType;
 import org.apache.cassandra.io.ISerializer;
 import org.apache.cassandra.io.sstable.format.Version;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -79,6 +82,11 @@ public class IndexInfo
         this.endOpenMarker = endOpenMarker;
     }
 
+    public static IndexInfo.Serializer serializer(Version version, SerializationHeader header)
+    {
+        return new IndexInfo.Serializer(version, header.clusteringTypes());
+    }
+
     public static class Serializer implements ISerializer<IndexInfo>
     {
         // This is the default index size that we use to delta-encode width when serializing so we get better vint-encoding.
@@ -87,21 +95,19 @@ public class IndexInfo
         // size so using the default is almost surely better than using no base at all.
         public static final long WIDTH_BASE = 64 * 1024;
 
-        private final ISerializer<ClusteringPrefix> clusteringSerializer;
-        private final Version version;
+        private final int version;
+        private final List<AbstractType<?>> clusteringTypes;
 
-        public Serializer(Version version, ISerializer<ClusteringPrefix> clusteringSerializer)
+        public Serializer(Version version, List<AbstractType<?>> clusteringTypes)
         {
-            this.clusteringSerializer = clusteringSerializer;
-            this.version = version;
+            this.version = version.correspondingMessagingVersion();
+            this.clusteringTypes = clusteringTypes;
         }
 
         public void serialize(IndexInfo info, DataOutputPlus out) throws IOException
         {
-            assert version.storeRows() : "We read old index files but we should never write them";
-
-            clusteringSerializer.serialize(info.firstName, out);
-            clusteringSerializer.serialize(info.lastName, out);
+            ClusteringPrefix.serializer.serialize(info.firstName, out, version, clusteringTypes);
+            ClusteringPrefix.serializer.serialize(info.lastName, out, version, clusteringTypes);
             out.writeUnsignedVInt(info.offset);
             out.writeVInt(info.width - WIDTH_BASE);
 
@@ -112,53 +118,33 @@ public class IndexInfo
 
         public void skip(DataInputPlus in) throws IOException
         {
-            clusteringSerializer.skip(in);
-            clusteringSerializer.skip(in);
-            if (version.storeRows())
-            {
-                in.readUnsignedVInt();
-                in.readVInt();
-                if (in.readBoolean())
-                    DeletionTime.serializer.skip(in);
-            }
-            else
-            {
-                in.skipBytes(TypeSizes.sizeof(0L));
-                in.skipBytes(TypeSizes.sizeof(0L));
-            }
+            ClusteringPrefix.serializer.skip(in, version, clusteringTypes);
+            ClusteringPrefix.serializer.skip(in, version, clusteringTypes);
+            in.readUnsignedVInt();
+            in.readVInt();
+            if (in.readBoolean())
+                DeletionTime.serializer.skip(in);
         }
 
         public IndexInfo deserialize(DataInputPlus in) throws IOException
         {
-            ClusteringPrefix firstName = clusteringSerializer.deserialize(in);
-            ClusteringPrefix lastName = clusteringSerializer.deserialize(in);
-            long offset;
-            long width;
+            ClusteringPrefix firstName = ClusteringPrefix.serializer.deserialize(in, version, clusteringTypes);
+            ClusteringPrefix lastName = ClusteringPrefix.serializer.deserialize(in, version, clusteringTypes);
+            long offset = in.readUnsignedVInt();
+            long width = in.readVInt() + WIDTH_BASE;
             DeletionTime endOpenMarker = null;
-            if (version.storeRows())
-            {
-                offset = in.readUnsignedVInt();
-                width = in.readVInt() + WIDTH_BASE;
-                if (in.readBoolean())
-                    endOpenMarker = DeletionTime.serializer.deserialize(in);
-            }
-            else
-            {
-                offset = in.readLong();
-                width = in.readLong();
-            }
+            if (in.readBoolean())
+                endOpenMarker = DeletionTime.serializer.deserialize(in);
             return new IndexInfo(firstName, lastName, offset, width, endOpenMarker);
         }
 
         public long serializedSize(IndexInfo info)
         {
-            assert version.storeRows() : "We read old index files but we should never write them";
-
-            long size = clusteringSerializer.serializedSize(info.firstName)
-                        + clusteringSerializer.serializedSize(info.lastName)
-                        + TypeSizes.sizeofUnsignedVInt(info.offset)
-                        + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
-                        + TypeSizes.sizeof(info.endOpenMarker != null);
+            long size = ClusteringPrefix.serializer.serializedSize(info.firstName, version, clusteringTypes)
+                      + ClusteringPrefix.serializer.serializedSize(info.lastName, version, clusteringTypes)
+                      + TypeSizes.sizeofUnsignedVInt(info.offset)
+                      + TypeSizes.sizeofVInt(info.width - WIDTH_BASE)
+                      + TypeSizes.sizeof(info.endOpenMarker != null);
 
             if (info.endOpenMarker != null)
                 size += DeletionTime.serializer.serializedSize(info.endOpenMarker);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummary.java b/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
index 6de3478..303adfd 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
@@ -268,16 +268,13 @@ public class IndexSummary extends WrappedSharedCloseable
 
     public static class IndexSummarySerializer
     {
-        public void serialize(IndexSummary t, DataOutputPlus out, boolean withSamplingLevel) throws IOException
+        public void serialize(IndexSummary t, DataOutputPlus out) throws IOException
         {
             out.writeInt(t.minIndexInterval);
             out.writeInt(t.offsetCount);
             out.writeLong(t.getOffHeapSize());
-            if (withSamplingLevel)
-            {
-                out.writeInt(t.samplingLevel);
-                out.writeInt(t.sizeAtFullSampling);
-            }
+            out.writeInt(t.samplingLevel);
+            out.writeInt(t.sizeAtFullSampling);
             // our on-disk representation treats the offsets and the summary data as one contiguous structure,
             // in which the offsets are based from the start of the structure. i.e., if the offsets occupy
             // X bytes, the value of the first offset will be X. In memory we split the two regions up, so that
@@ -297,7 +294,7 @@ public class IndexSummary extends WrappedSharedCloseable
         }
 
         @SuppressWarnings("resource")
-        public IndexSummary deserialize(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel, int expectedMinIndexInterval, int maxIndexInterval) throws IOException
+        public IndexSummary deserialize(DataInputStream in, IPartitioner partitioner, int expectedMinIndexInterval, int maxIndexInterval) throws IOException
         {
             int minIndexInterval = in.readInt();
             if (minIndexInterval != expectedMinIndexInterval)
@@ -308,17 +305,8 @@ public class IndexSummary extends WrappedSharedCloseable
 
             int offsetCount = in.readInt();
             long offheapSize = in.readLong();
-            int samplingLevel, fullSamplingSummarySize;
-            if (haveSamplingLevel)
-            {
-                samplingLevel = in.readInt();
-                fullSamplingSummarySize = in.readInt();
-            }
-            else
-            {
-                samplingLevel = BASE_SAMPLING_LEVEL;
-                fullSamplingSummarySize = offsetCount;
-            }
+            int samplingLevel = in.readInt();
+            int fullSamplingSummarySize = in.readInt();
 
             int effectiveIndexInterval = (int) Math.ceil((BASE_SAMPLING_LEVEL / (double) samplingLevel) * minIndexInterval);
             if (effectiveIndexInterval > maxIndexInterval)
@@ -355,13 +343,12 @@ public class IndexSummary extends WrappedSharedCloseable
          *
          * Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used.
          */
-        public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException
+        public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner) throws IOException
         {
             in.skipBytes(4); // minIndexInterval
             int offsetCount = in.readInt();
             long offheapSize = in.readLong();
-            if (haveSamplingLevel)
-                in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
+            in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
 
             in.skip(offsetCount * 4);
             in.skip(offheapSize - offsetCount * 4);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
index 8fb4835..fc326dc 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryRedistribution.java
@@ -73,21 +73,9 @@ public class IndexSummaryRedistribution extends CompactionInfo.Holder
     public List<SSTableReader> redistributeSummaries() throws IOException
     {
         logger.info("Redistributing index summaries");
-        List<SSTableReader> oldFormatSSTables = new ArrayList<>();
         List<SSTableReader> redistribute = new ArrayList<>();
         for (LifecycleTransaction txn : transactions.values())
         {
-            for (SSTableReader sstable : ImmutableList.copyOf(txn.originals()))
-            {
-                // We can't change the sampling level of sstables with the old format, because the serialization format
-                // doesn't include the sampling level.  Leave this one as it is.  (See CASSANDRA-8993 for details.)
-                logger.trace("SSTable {} cannot be re-sampled due to old sstable format", sstable);
-                if (!sstable.descriptor.version.hasSamplingLevel())
-                {
-                    oldFormatSSTables.add(sstable);
-                    txn.cancel(sstable);
-                }
-            }
             redistribute.addAll(txn.originals());
         }
 
@@ -119,7 +107,7 @@ public class IndexSummaryRedistribution extends CompactionInfo.Holder
         Collections.sort(sstablesByHotness, new ReadRateComparator(readRates));
 
         long remainingBytes = memoryPoolBytes;
-        for (SSTableReader sstable : Iterables.concat(compacting, oldFormatSSTables))
+        for (SSTableReader sstable : compacting)
             remainingBytes -= sstable.getIndexSummaryOffHeapSize();
 
         logger.trace("Index summaries for compacting SSTables are using {} MB of space",
@@ -130,7 +118,7 @@ public class IndexSummaryRedistribution extends CompactionInfo.Holder
             txn.finish();
 
         total = 0;
-        for (SSTableReader sstable : Iterables.concat(compacting, oldFormatSSTables, newSSTables))
+        for (SSTableReader sstable : Iterables.concat(compacting, newSSTables))
             total += sstable.getIndexSummaryOffHeapSize();
         logger.trace("Completed resizing of index summaries; current approximate memory used: {}",
                      FBUtilities.prettyPrintMemory(total));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/SSTable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTable.java b/src/java/org/apache/cassandra/io/sstable/SSTable.java
index 601f5a0..8556cfa 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTable.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTable.java
@@ -168,14 +168,40 @@ public abstract class SSTable
     }
 
     /**
-     * @return Descriptor and Component pair. null if given file is not acceptable as SSTable component.
-     *         If component is of unknown type, returns CUSTOM component.
+     * Parse a sstable filename into both a {@link Descriptor} and {@code Component} object.
+     *
+     * @param file the filename to parse.
+     * @return a pair of the {@code Descriptor} and {@code Component} corresponding to {@code file} if it corresponds to
+     * a valid and supported sstable filename, {@code null} otherwise. Note that components of an unknown type will be
+     * returned as CUSTOM ones.
+     */
+    public static Pair<Descriptor, Component> tryComponentFromFilename(File file)
+    {
+        try
+        {
+            return Descriptor.fromFilenameWithComponent(file);
+        }
+        catch (Throwable e)
+        {
+            return null;
+        }
+    }
+
+    /**
+     * Parse a sstable filename into a {@link Descriptor} object.
+     * <p>
+     * Note that this method ignores the component part of the filename; if this is not what you want, use
+     * {@link #tryComponentFromFilename} instead.
+     *
+     * @param file the filename to parse.
+     * @return the {@code Descriptor} corresponding to {@code file} if it corresponds to a valid and supported sstable
+     * filename, {@code null} otherwise.
      */
-    public static Pair<Descriptor, Component> tryComponentFromFilename(File dir, String name)
+    public static Descriptor tryDescriptorFromFilename(File file)
     {
         try
         {
-            return Component.fromFilename(dir, name);
+            return Descriptor.fromFilename(file);
         }
         catch (Throwable e)
         {
@@ -218,17 +244,9 @@ public abstract class SSTable
         Set<Component> components = Sets.newHashSetWithExpectedSize(knownTypes.size());
         for (Component.Type componentType : knownTypes)
         {
-            if (componentType == Component.Type.DIGEST)
-            {
-                if (desc.digestComponent != null && new File(desc.filenameFor(desc.digestComponent)).exists())
-                    components.add(desc.digestComponent);
-            }
-            else
-            {
-                Component component = new Component(componentType);
-                if (new File(desc.filenameFor(component)).exists())
-                    components.add(component);
-            }
+            Component component = new Component(componentType);
+            if (new File(desc.filenameFor(component)).exists())
+                components.add(component);
         }
         return components;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
index 043f6fa..e00de4a 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableLoader.java
@@ -85,7 +85,7 @@ public class SSTableLoader implements StreamEventHandler
                                               return false;
                                           }
 
-                                          Pair<Descriptor, Component> p = SSTable.tryComponentFromFilename(dir, name);
+                                          Pair<Descriptor, Component> p = SSTable.tryComponentFromFilename(file);
                                           Descriptor desc = p == null ? null : p.left;
                                           if (p == null || !p.right.equals(Component.DATA))
                                               return false;


[11/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
Remove pre-3.0 compatibility code for 4.0

patch by Sylvain Lebresne; reviewed by Aleksey Yeschenko for CASSANDRA-12716


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4a246419
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4a246419
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4a246419

Branch: refs/heads/trunk
Commit: 4a2464192e9e69457f5a5ecf26c094f9298bf069
Parents: 3fabc33
Author: Sylvain Lebresne <sy...@datastax.com>
Authored: Tue Sep 27 15:26:15 2016 +0200
Committer: Sylvain Lebresne <sy...@datastax.com>
Committed: Wed Nov 30 10:23:18 2016 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |    1 +
 NEWS.txt                                        |    4 +
 .../cassandra/auth/CassandraRoleManager.java    |   10 -
 .../batchlog/LegacyBatchlogMigrator.java        |  199 ----
 .../org/apache/cassandra/config/CFMetaData.java |    8 -
 .../restrictions/StatementRestrictions.java     |    3 -
 .../apache/cassandra/db/ColumnFamilyStore.java  |   27 +-
 .../org/apache/cassandra/db/Directories.java    |   40 +-
 .../org/apache/cassandra/db/LegacyLayout.java   |  488 +-------
 src/java/org/apache/cassandra/db/Memtable.java  |   12 +-
 src/java/org/apache/cassandra/db/Mutation.java  |   51 +-
 .../cassandra/db/MutationVerbHandler.java       |   19 +-
 .../cassandra/db/PartitionRangeReadCommand.java |    6 +-
 .../cassandra/db/RangeSliceVerbHandler.java     |   29 -
 .../org/apache/cassandra/db/ReadCommand.java    | 1061 +----------------
 .../org/apache/cassandra/db/ReadResponse.java   |  264 +----
 .../org/apache/cassandra/db/RowIndexEntry.java  |  189 +--
 .../org/apache/cassandra/db/Serializers.java    |  183 ---
 .../db/SinglePartitionReadCommand.java          |    4 +-
 .../org/apache/cassandra/db/SystemKeyspace.java |  229 +---
 .../cassandra/db/UnfilteredDeserializer.java    |  658 ++---------
 .../columniterator/AbstractSSTableIterator.java |   44 +-
 .../db/columniterator/SSTableIterator.java      |    6 +-
 .../columniterator/SSTableReversedIterator.java |   18 +-
 .../db/commitlog/CommitLogArchiver.java         |    2 +-
 .../db/commitlog/CommitLogDescriptor.java       |   47 +-
 .../cassandra/db/commitlog/CommitLogReader.java |   44 +-
 .../db/compaction/CompactionManager.java        |    4 +-
 .../cassandra/db/compaction/Upgrader.java       |    2 +-
 .../cassandra/db/compaction/Verifier.java       |    3 +-
 .../writers/DefaultCompactionWriter.java        |    2 +-
 .../writers/MajorLeveledCompactionWriter.java   |    2 +-
 .../writers/MaxSSTableSizeWriter.java           |    2 +-
 .../SplittingSizeTieredCompactionWriter.java    |    2 +-
 .../apache/cassandra/db/filter/RowFilter.java   |  103 +-
 .../db/partitions/PartitionUpdate.java          |   60 +-
 .../UnfilteredPartitionIterators.java           |    9 +-
 .../UnfilteredRowIteratorWithLowerBound.java    |    5 +-
 .../db/rows/UnfilteredRowIterators.java         |   10 +-
 .../apache/cassandra/dht/AbstractBounds.java    |    5 +
 src/java/org/apache/cassandra/gms/Gossiper.java |    6 -
 .../cassandra/hints/LegacyHintsMigrator.java    |  244 ----
 .../io/ForwardingVersionedSerializer.java       |   57 -
 .../io/compress/CompressionMetadata.java        |   11 +-
 .../io/sstable/AbstractSSTableSimpleWriter.java |    9 +-
 .../apache/cassandra/io/sstable/Component.java  |   94 +-
 .../apache/cassandra/io/sstable/Descriptor.java |  264 ++---
 .../apache/cassandra/io/sstable/IndexInfo.java  |   78 +-
 .../cassandra/io/sstable/IndexSummary.java      |   29 +-
 .../io/sstable/IndexSummaryRedistribution.java  |   16 +-
 .../apache/cassandra/io/sstable/SSTable.java    |   48 +-
 .../cassandra/io/sstable/SSTableLoader.java     |    2 +-
 .../io/sstable/SSTableSimpleIterator.java       |  112 +-
 .../cassandra/io/sstable/SSTableTxnWriter.java  |   10 +-
 .../sstable/format/RangeAwareSSTableWriter.java |    4 +-
 .../io/sstable/format/SSTableFormat.java        |    8 -
 .../io/sstable/format/SSTableReader.java        |   94 +-
 .../io/sstable/format/SSTableWriter.java        |   16 +-
 .../cassandra/io/sstable/format/Version.java    |   22 -
 .../io/sstable/format/big/BigFormat.java        |  125 +-
 .../io/sstable/format/big/BigTableWriter.java   |    6 +-
 .../io/sstable/metadata/CompactionMetadata.java |   13 -
 .../metadata/LegacyMetadataSerializer.java      |  163 ---
 .../io/sstable/metadata/StatsMetadata.java      |   44 +-
 .../io/util/CompressedChunkReader.java          |    5 +-
 .../io/util/DataIntegrityMetadata.java          |    6 +-
 .../cassandra/net/IncomingTcpConnection.java    |   28 +-
 .../org/apache/cassandra/net/MessageOut.java    |    2 +-
 .../apache/cassandra/net/MessagingService.java  |   73 +-
 .../cassandra/net/OutboundTcpConnection.java    |   35 +-
 .../apache/cassandra/repair/RepairJobDesc.java  |   27 +-
 .../org/apache/cassandra/repair/Validator.java  |    2 +-
 .../cassandra/schema/LegacySchemaMigrator.java  | 1099 ------------------
 .../cassandra/service/AbstractReadExecutor.java |    5 +-
 .../apache/cassandra/service/CacheService.java  |    4 -
 .../cassandra/service/CassandraDaemon.java      |   28 -
 .../apache/cassandra/service/DataResolver.java  |    2 +-
 .../apache/cassandra/service/ReadCallback.java  |    5 +-
 .../apache/cassandra/service/StartupChecks.java |    7 +-
 .../apache/cassandra/service/StorageProxy.java  |   72 +-
 .../cassandra/service/StorageService.java       |   11 +-
 .../apache/cassandra/service/paxos/Commit.java  |   16 +-
 .../service/paxos/PrepareResponse.java          |   39 +-
 .../cassandra/streaming/StreamReader.java       |   11 +-
 .../compress/CompressedStreamReader.java        |    3 +-
 .../streaming/messages/FileMessageHeader.java   |   25 +-
 .../streaming/messages/StreamMessage.java       |    2 -
 .../apache/cassandra/tools/SSTableExport.java   |    9 +-
 .../cassandra/tools/SSTableMetadataViewer.java  |    2 +-
 .../tools/SSTableRepairedAtSetter.java          |   21 +-
 .../cassandra/tools/StandaloneSplitter.java     |    5 +-
 .../org/apache/cassandra/utils/BloomFilter.java |   18 +-
 .../cassandra/utils/BloomFilterSerializer.java  |    8 +-
 .../apache/cassandra/utils/FilterFactory.java   |   16 +-
 .../org/apache/cassandra/utils/MerkleTree.java  |   48 +-
 .../lb-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Data.db     |  Bin 84 -> 0 bytes
 .../lb-1-big-Digest.adler32                     |    1 -
 .../cf_with_duplicates_2_0/lb-1-big-Filter.db   |  Bin 16 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Index.db    |  Bin 18 -> 0 bytes
 .../lb-1-big-Statistics.db                      |  Bin 4474 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Summary.db  |  Bin 84 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-TOC.txt     |    8 -
 .../2.0/CommitLog-3-1431528750790.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750791.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750792.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750793.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.0/hash.txt         |    3 -
 .../2.1/CommitLog-4-1431529069529.log           |  Bin 2097152 -> 0 bytes
 .../2.1/CommitLog-4-1431529069530.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.1/hash.txt         |    3 -
 .../CommitLog-5-1438186885380.log               |  Bin 839051 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-bitrot/hash.txt    |    6 -
 .../CommitLog-5-1438186885380.log               |  Bin 839051 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-bitrot2/hash.txt   |    6 -
 .../CommitLog-5-1438186885380.log               |  Bin 839001 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-truncated/hash.txt |    5 -
 .../2.2-lz4/CommitLog-5-1438186885380.log       |  Bin 839051 -> 0 bytes
 .../2.2-lz4/CommitLog-5-1438186885381.log       |  Bin 100 -> 0 bytes
 test/data/legacy-commitlog/2.2-lz4/hash.txt     |    5 -
 .../2.2-snappy/CommitLog-5-1438186915514.log    |  Bin 820332 -> 0 bytes
 .../2.2-snappy/CommitLog-5-1438186915515.log    |  Bin 99 -> 0 bytes
 test/data/legacy-commitlog/2.2-snappy/hash.txt  |    5 -
 .../2.2/CommitLog-5-1438186815314.log           |  Bin 2097152 -> 0 bytes
 .../2.2/CommitLog-5-1438186815315.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.2/hash.txt         |    5 -
 .../Keyspace1/Keyspace1-Standard1-jb-0-CRC.db   |  Bin 8 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-Data.db  |  Bin 36000 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Digest.sha1        |    1 -
 .../Keyspace1-Standard1-jb-0-Filter.db          |  Bin 1136 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-Index.db |  Bin 15300 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Statistics.db      |  Bin 4395 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Summary.db         |  Bin 162 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt  |    8 -
 ...bles-legacy_jb_clust-jb-1-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-Data.db  |  Bin 12006 -> 0 bytes
 ...legacy_tables-legacy_jb_clust-jb-1-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-Index.db |  Bin 1219455 -> 0 bytes
 ...cy_tables-legacy_jb_clust-jb-1-Statistics.db |  Bin 6798 -> 0 bytes
 ...egacy_tables-legacy_jb_clust-jb-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-TOC.txt  |    7 -
 ...acy_jb_clust_compact-jb-1-CompressionInfo.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_jb_clust_compact-jb-1-Data.db |  Bin 5270 -> 0 bytes
 ...ables-legacy_jb_clust_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_jb_clust_compact-jb-1-Index.db |  Bin 157685 -> 0 bytes
 ...s-legacy_jb_clust_compact-jb-1-Statistics.db |  Bin 6791 -> 0 bytes
 ...bles-legacy_jb_clust_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_jb_clust_compact-jb-1-TOC.txt |    7 -
 ...acy_jb_clust_counter-jb-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ..._tables-legacy_jb_clust_counter-jb-1-Data.db |  Bin 4276 -> 0 bytes
 ...ables-legacy_jb_clust_counter-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_jb_clust_counter-jb-1-Index.db |  Bin 610555 -> 0 bytes
 ...s-legacy_jb_clust_counter-jb-1-Statistics.db |  Bin 6801 -> 0 bytes
 ...bles-legacy_jb_clust_counter-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_jb_clust_counter-jb-1-TOC.txt |    7 -
 ...lust_counter_compact-jb-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ...legacy_jb_clust_counter_compact-jb-1-Data.db |  Bin 4228 -> 0 bytes
 ...gacy_jb_clust_counter_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...egacy_jb_clust_counter_compact-jb-1-Index.db |  Bin 157685 -> 0 bytes
 ..._jb_clust_counter_compact-jb-1-Statistics.db |  Bin 6791 -> 0 bytes
 ...acy_jb_clust_counter_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...legacy_jb_clust_counter_compact-jb-1-TOC.txt |    7 -
 ...les-legacy_jb_simple-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../legacy_tables-legacy_jb_simple-jb-1-Data.db |  Bin 134 -> 0 bytes
 ...egacy_tables-legacy_jb_simple-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...legacy_tables-legacy_jb_simple-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...y_tables-legacy_jb_simple-jb-1-Statistics.db |  Bin 4392 -> 0 bytes
 ...gacy_tables-legacy_jb_simple-jb-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_jb_simple-jb-1-TOC.txt |    7 -
 ...cy_jb_simple_compact-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_jb_simple_compact-jb-1-Data.db |  Bin 108 -> 0 bytes
 ...bles-legacy_jb_simple_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_jb_simple_compact-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_jb_simple_compact-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...les-legacy_jb_simple_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_jb_simple_compact-jb-1-TOC.txt |    7 -
 ...cy_jb_simple_counter-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_jb_simple_counter-jb-1-Data.db |  Bin 118 -> 0 bytes
 ...bles-legacy_jb_simple_counter-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_jb_simple_counter-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_jb_simple_counter-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...les-legacy_jb_simple_counter-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_jb_simple_counter-jb-1-TOC.txt |    7 -
 ...mple_counter_compact-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...egacy_jb_simple_counter_compact-jb-1-Data.db |  Bin 118 -> 0 bytes
 ...acy_jb_simple_counter_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...gacy_jb_simple_counter_compact-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...jb_simple_counter_compact-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...cy_jb_simple_counter_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...egacy_jb_simple_counter_compact-jb-1-TOC.txt |    7 -
 ...bles-legacy_ka_clust-ka-1-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-Data.db  |  Bin 12144 -> 0 bytes
 ...gacy_tables-legacy_ka_clust-ka-1-Digest.sha1 |    1 -
 ...legacy_tables-legacy_ka_clust-ka-1-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-Index.db |  Bin 1219455 -> 0 bytes
 ...cy_tables-legacy_ka_clust-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...egacy_tables-legacy_ka_clust-ka-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-TOC.txt  |    8 -
 ...acy_ka_clust_compact-ka-1-CompressionInfo.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_ka_clust_compact-ka-1-Data.db |  Bin 5277 -> 0 bytes
 ...les-legacy_ka_clust_compact-ka-1-Digest.sha1 |    1 -
 ...ables-legacy_ka_clust_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_ka_clust_compact-ka-1-Index.db |  Bin 157685 -> 0 bytes
 ...s-legacy_ka_clust_compact-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...bles-legacy_ka_clust_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_ka_clust_compact-ka-1-TOC.txt |    8 -
 ...acy_ka_clust_counter-ka-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ..._tables-legacy_ka_clust_counter-ka-1-Data.db |  Bin 4635 -> 0 bytes
 ...les-legacy_ka_clust_counter-ka-1-Digest.sha1 |    1 -
 ...ables-legacy_ka_clust_counter-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_ka_clust_counter-ka-1-Index.db |  Bin 610555 -> 0 bytes
 ...s-legacy_ka_clust_counter-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...bles-legacy_ka_clust_counter-ka-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_ka_clust_counter-ka-1-TOC.txt |    8 -
 ...lust_counter_compact-ka-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ...legacy_ka_clust_counter_compact-ka-1-Data.db |  Bin 4527 -> 0 bytes
 ...cy_ka_clust_counter_compact-ka-1-Digest.sha1 |    1 -
 ...gacy_ka_clust_counter_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...egacy_ka_clust_counter_compact-ka-1-Index.db |  Bin 157685 -> 0 bytes
 ..._ka_clust_counter_compact-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...acy_ka_clust_counter_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...legacy_ka_clust_counter_compact-ka-1-TOC.txt |    8 -
 ...les-legacy_ka_simple-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../legacy_tables-legacy_ka_simple-ka-1-Data.db |  Bin 134 -> 0 bytes
 ...acy_tables-legacy_ka_simple-ka-1-Digest.sha1 |    1 -
 ...egacy_tables-legacy_ka_simple-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...legacy_tables-legacy_ka_simple-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...y_tables-legacy_ka_simple-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...gacy_tables-legacy_ka_simple-ka-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_ka_simple-ka-1-TOC.txt |    8 -
 ...cy_ka_simple_compact-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_ka_simple_compact-ka-1-Data.db |  Bin 105 -> 0 bytes
 ...es-legacy_ka_simple_compact-ka-1-Digest.sha1 |    1 -
 ...bles-legacy_ka_simple_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_ka_simple_compact-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_ka_simple_compact-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...les-legacy_ka_simple_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...tables-legacy_ka_simple_compact-ka-1-TOC.txt |    8 -
 ...cy_ka_simple_counter-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_ka_simple_counter-ka-1-Data.db |  Bin 125 -> 0 bytes
 ...es-legacy_ka_simple_counter-ka-1-Digest.sha1 |    1 -
 ...bles-legacy_ka_simple_counter-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_ka_simple_counter-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_ka_simple_counter-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...les-legacy_ka_simple_counter-ka-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_ka_simple_counter-ka-1-TOC.txt |    8 -
 ...mple_counter_compact-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...egacy_ka_simple_counter_compact-ka-1-Data.db |  Bin 124 -> 0 bytes
 ...y_ka_simple_counter_compact-ka-1-Digest.sha1 |    1 -
 ...acy_ka_simple_counter_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...gacy_ka_simple_counter_compact-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...ka_simple_counter_compact-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...cy_ka_simple_counter_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...egacy_ka_simple_counter_compact-ka-1-TOC.txt |    8 -
 .../legacy_la_clust/la-1-big-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_la_clust/la-1-big-Data.db            |  Bin 12082 -> 0 bytes
 .../legacy_la_clust/la-1-big-Digest.adler32     |    1 -
 .../legacy_la_clust/la-1-big-Filter.db          |  Bin 24 -> 0 bytes
 .../legacy_la_clust/la-1-big-Index.db           |  Bin 1219455 -> 0 bytes
 .../legacy_la_clust/la-1-big-Statistics.db      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust/la-1-big-Summary.db         |  Bin 71 -> 0 bytes
 .../legacy_la_clust/la-1-big-TOC.txt            |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 83 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Data.db    |  Bin 5286 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_clust_compact/la-1-big-Filter.db  |  Bin 24 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Index.db   |  Bin 157685 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Summary.db |  Bin 75 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-TOC.txt    |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 75 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Data.db    |  Bin 4623 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_clust_counter/la-1-big-Filter.db  |  Bin 24 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Index.db   |  Bin 610555 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-TOC.txt    |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 75 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 4527 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 24 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 157685 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple/la-1-big-Data.db           |  Bin 139 -> 0 bytes
 .../legacy_la_simple/la-1-big-Digest.adler32    |    1 -
 .../legacy_la_simple/la-1-big-Filter.db         |  Bin 24 -> 0 bytes
 .../legacy_la_simple/la-1-big-Index.db          |  Bin 75 -> 0 bytes
 .../legacy_la_simple/la-1-big-Statistics.db     |  Bin 4453 -> 0 bytes
 .../legacy_la_simple/la-1-big-Summary.db        |  Bin 71 -> 0 bytes
 .../legacy_la_simple/la-1-big-TOC.txt           |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-Data.db   |  Bin 106 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_simple_compact/la-1-big-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-Index.db  |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-TOC.txt   |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-Data.db   |  Bin 123 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_simple_counter/la-1-big-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-Index.db  |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 71 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-TOC.txt   |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 124 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 24 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 ...pactions_in_progress-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../system-compactions_in_progress-ka-1-Data.db |  Bin 146 -> 0 bytes
 ...tem-compactions_in_progress-ka-1-Digest.sha1 |    1 -
 ...ystem-compactions_in_progress-ka-1-Filter.db |  Bin 16 -> 0 bytes
 ...system-compactions_in_progress-ka-1-Index.db |  Bin 30 -> 0 bytes
 ...m-compactions_in_progress-ka-1-Statistics.db |  Bin 4450 -> 0 bytes
 ...stem-compactions_in_progress-ka-1-Summary.db |  Bin 116 -> 0 bytes
 .../system-compactions_in_progress-ka-1-TOC.txt |    8 -
 .../test-foo-ka-3-CompressionInfo.db            |  Bin 43 -> 0 bytes
 .../test-foo-ka-3-Data.db                       |  Bin 141 -> 0 bytes
 .../test-foo-ka-3-Digest.sha1                   |    1 -
 .../test-foo-ka-3-Filter.db                     |  Bin 176 -> 0 bytes
 .../test-foo-ka-3-Index.db                      |  Bin 90 -> 0 bytes
 .../test-foo-ka-3-Statistics.db                 |  Bin 4458 -> 0 bytes
 .../test-foo-ka-3-Summary.db                    |  Bin 80 -> 0 bytes
 .../test-foo-ka-3-TOC.txt                       |    8 -
 .../test-foo-tmp-ka-4-Data.db                   |  Bin 141 -> 0 bytes
 .../test-foo-tmp-ka-4-Index.db                  |  Bin 90 -> 0 bytes
 .../test-foo-tmplink-ka-4-Data.db               |  Bin 141 -> 0 bytes
 .../test-foo-tmplink-ka-4-Index.db              |  Bin 90 -> 0 bytes
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 93 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 16 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 54 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4442 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 80 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 .../tmp-la-2-big-Data.db                        |  Bin 93 -> 0 bytes
 .../tmp-la-2-big-Index.db                       |  Bin 54 -> 0 bytes
 .../tmp-lb-3-big-Data.db                        |  Bin 93 -> 0 bytes
 .../tmp-lb-3-big-Index.db                       |  Bin 54 -> 0 bytes
 .../tmplink-la-2-big-Data.db                    |  Bin 93 -> 0 bytes
 .../tmplink-la-2-big-Index.db                   |  Bin 54 -> 0 bytes
 .../manifest.json                               |    1 -
 .../manifest.json                               |    1 -
 .../manifest.json                               |    1 -
 .../Keyspace1-legacyleveled-ic-0-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-0-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-TOC.txt        |    7 -
 .../Keyspace1-legacyleveled-ic-1-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-1-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-TOC.txt        |    7 -
 .../Keyspace1-legacyleveled-ic-2-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-2-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-TOC.txt        |    7 -
 .../Keyspace1/legacyleveled/legacyleveled.json  |   27 -
 .../org/apache/cassandra/cql3/ViewLongTest.java |    2 +-
 .../cassandra/utils/LongBloomFilterTest.java    |   66 +-
 .../cassandra/AbstractSerializationsTester.java |    6 -
 .../apache/cassandra/batchlog/BatchTest.java    |   59 -
 .../cassandra/batchlog/BatchlogManagerTest.java |  123 +-
 .../cassandra/cache/CacheProviderTest.java      |    4 +-
 .../org/apache/cassandra/cql3/CQLTester.java    |    1 +
 .../org/apache/cassandra/db/PartitionTest.java  |   18 +-
 .../apache/cassandra/db/ReadResponseTest.java   |   99 --
 .../apache/cassandra/db/RowIndexEntryTest.java  |   43 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |   40 +-
 .../db/SinglePartitionSliceCommandTest.java     |   55 -
 .../apache/cassandra/db/SystemKeyspaceTest.java |   77 --
 .../org/apache/cassandra/db/VerifyTest.java     |    7 +-
 .../db/commitlog/CommitLogDescriptorTest.java   |   10 +-
 .../cassandra/db/commitlog/CommitLogTest.java   |   13 +-
 .../db/commitlog/CommitLogUpgradeTest.java      |   77 --
 .../db/compaction/AntiCompactionTest.java       |    4 +-
 .../db/lifecycle/RealTransactionsTest.java      |    5 +-
 .../rows/DigestBackwardCompatibilityTest.java   |  179 ---
 .../hints/LegacyHintsMigratorTest.java          |  197 ----
 .../sasi/disk/PerSSTableIndexWriterTest.java    |    4 +-
 .../CompressedRandomAccessReaderTest.java       |    8 +-
 .../CompressedSequentialWriterTest.java         |    2 +-
 .../io/sstable/BigTableWriterTest.java          |   12 +-
 .../cassandra/io/sstable/DescriptorTest.java    |   42 +-
 .../cassandra/io/sstable/IndexSummaryTest.java  |    8 +-
 .../cassandra/io/sstable/LegacySSTableTest.java |   16 +-
 .../io/sstable/SSTableRewriterTest.java         |    4 +-
 .../cassandra/io/sstable/SSTableUtils.java      |    2 +-
 .../io/sstable/SSTableWriterTestBase.java       |    4 +-
 .../sstable/format/ClientModeSSTableTest.java   |  133 ---
 .../format/SSTableFlushObserverTest.java        |    2 +-
 .../metadata/MetadataSerializerTest.java        |    9 +-
 .../cassandra/io/util/MmappedRegionsTest.java   |    2 +-
 .../schema/LegacySchemaMigratorTest.java        |  845 --------------
 .../cassandra/service/SerializationsTest.java   |    2 +-
 .../compression/CompressedInputStreamTest.java  |    5 +-
 .../org/apache/cassandra/utils/BitSetTest.java  |    9 +-
 .../apache/cassandra/utils/BloomFilterTest.java |   84 +-
 .../cassandra/utils/SerializationsTest.java     |   90 +-
 418 files changed, 805 insertions(+), 8864 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 28b7900..61844f2 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 4.0
+ * Remove pre-3.0 compatibility code for 4.0 (CASSANDRA-12716)
  * Add column definition kind to dropped columns in schema (CASSANDRA-12705)
  * Add (automate) Nodetool Documentation (CASSANDRA-12672)
  * Update bundled cqlsh python driver to 3.7.0 (CASSANDRA-12736)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 631d770..d838847 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -21,6 +21,10 @@ New features
 
 Upgrading
 ---------
+    - Cassandra 4.0 removed support for any pre-3.0 format. This means you cannot upgrade from a 2.x version to 4.0
+      directly, you have to upgrade to a 3.0.x/3.x version first (and run upgradesstable). In particular, this mean
+      Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you will need to upgrade those sstable in 3.0.x/3.x
+      first.
     - Cassandra will no longer allow invalid keyspace replication options, such as invalid datacenter names for
       NetworkTopologyStrategy. Operators MUST add new nodes to a datacenter before they can set set ALTER or 
       CREATE keyspace replication policies using that datacenter. Existing keyspaces will continue to operate, 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index 7b55ac9..d371df3 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -375,16 +375,6 @@ public class CassandraRoleManager implements IRoleManager
         {
             public void run()
             {
-                // If not all nodes are on 2.2, we don't want to initialize the role manager as this will confuse 2.1
-                // nodes (see CASSANDRA-9761 for details). So we re-schedule the setup for later, hoping that the upgrade
-                // will be finished by then.
-                if (!MessagingService.instance().areAllNodesAtLeast22())
-                {
-                    logger.trace("Not all nodes are upgraded to a version that supports Roles yet, rescheduling setup task");
-                    scheduleSetupTask(setupTask);
-                    return;
-                }
-
                 isClusterReady = true;
                 try
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java b/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
deleted file mode 100644
index 4592488..0000000
--- a/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.batchlog;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.SchemaConstants;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.UUIDType;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.exceptions.WriteFailureException;
-import org.apache.cassandra.exceptions.WriteTimeoutException;
-import org.apache.cassandra.io.util.DataInputBuffer;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.service.AbstractWriteResponseHandler;
-import org.apache.cassandra.service.WriteResponseHandler;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
-
-public final class LegacyBatchlogMigrator
-{
-    private static final Logger logger = LoggerFactory.getLogger(LegacyBatchlogMigrator.class);
-
-    private LegacyBatchlogMigrator()
-    {
-        // static class
-    }
-
-    @SuppressWarnings("deprecation")
-    public static void migrate()
-    {
-        ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG);
-
-        // nothing to migrate
-        if (store.isEmpty())
-            return;
-
-        logger.info("Migrating legacy batchlog to new storage");
-
-        int convertedBatches = 0;
-        String query = String.format("SELECT id, data, written_at, version FROM %s.%s",
-                                     SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                     SystemKeyspace.LEGACY_BATCHLOG);
-
-        int pageSize = BatchlogManager.calculatePageSize(store);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize);
-        for (UntypedResultSet.Row row : rows)
-        {
-            if (apply(row, convertedBatches))
-                convertedBatches++;
-        }
-
-        if (convertedBatches > 0)
-            Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking();
-    }
-
-    @SuppressWarnings("deprecation")
-    public static boolean isLegacyBatchlogMutation(Mutation mutation)
-    {
-        return mutation.getKeyspaceName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME)
-            && mutation.getPartitionUpdate(SystemKeyspace.LegacyBatchlog.cfId) != null;
-    }
-
-    @SuppressWarnings("deprecation")
-    public static void handleLegacyMutation(Mutation mutation)
-    {
-        PartitionUpdate update = mutation.getPartitionUpdate(SystemKeyspace.LegacyBatchlog.cfId);
-        logger.trace("Applying legacy batchlog mutation {}", update);
-        update.forEach(row -> apply(UntypedResultSet.Row.fromInternalRow(update.metadata(), update.partitionKey(), row), -1));
-    }
-
-    private static boolean apply(UntypedResultSet.Row row, long counter)
-    {
-        UUID id = row.getUUID("id");
-        long timestamp = id.version() == 1 ? UUIDGen.unixTimestamp(id) : row.getLong("written_at");
-        int version = row.has("version") ? row.getInt("version") : MessagingService.VERSION_12;
-
-        if (id.version() != 1)
-            id = UUIDGen.getTimeUUID(timestamp, counter);
-
-        logger.trace("Converting mutation at {}", timestamp);
-
-        try (DataInputBuffer in = new DataInputBuffer(row.getBytes("data"), false))
-        {
-            int numMutations = in.readInt();
-            List<Mutation> mutations = new ArrayList<>(numMutations);
-            for (int i = 0; i < numMutations; i++)
-                mutations.add(Mutation.serializer.deserialize(in, version));
-
-            BatchlogManager.store(Batch.createLocal(id, TimeUnit.MILLISECONDS.toMicros(timestamp), mutations));
-            return true;
-        }
-        catch (Throwable t)
-        {
-            logger.error("Failed to convert mutation {} at timestamp {}", id, timestamp, t);
-            return false;
-        }
-    }
-
-    public static void syncWriteToBatchlog(WriteResponseHandler<?> handler, Batch batch, Collection<InetAddress> endpoints)
-    throws WriteTimeoutException, WriteFailureException
-    {
-        for (InetAddress target : endpoints)
-        {
-            logger.trace("Sending legacy batchlog store request {} to {} for {} mutations", batch.id, target, batch.size());
-
-            int targetVersion = MessagingService.instance().getVersion(target);
-            MessagingService.instance().sendRR(getStoreMutation(batch, targetVersion).createMessage(MessagingService.Verb.MUTATION),
-                                               target,
-                                               handler,
-                                               false);
-        }
-    }
-
-    public static void asyncRemoveFromBatchlog(Collection<InetAddress> endpoints, UUID uuid, long queryStartNanoTime)
-    {
-        AbstractWriteResponseHandler<IMutation> handler = new WriteResponseHandler<>(endpoints,
-                                                                                     Collections.<InetAddress>emptyList(),
-                                                                                     ConsistencyLevel.ANY,
-                                                                                     Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME),
-                                                                                     null,
-                                                                                     WriteType.SIMPLE,
-                                                                                     queryStartNanoTime);
-        Mutation mutation = getRemoveMutation(uuid);
-
-        for (InetAddress target : endpoints)
-        {
-            logger.trace("Sending legacy batchlog remove request {} to {}", uuid, target);
-            MessagingService.instance().sendRR(mutation.createMessage(MessagingService.Verb.MUTATION), target, handler, false);
-        }
-    }
-
-    static void store(Batch batch, int version)
-    {
-        getStoreMutation(batch, version).apply();
-    }
-
-    @SuppressWarnings("deprecation")
-    static Mutation getStoreMutation(Batch batch, int version)
-    {
-        PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(SystemKeyspace.LegacyBatchlog, batch.id);
-        builder.row()
-               .timestamp(batch.creationTime)
-               .add("written_at", new Date(batch.creationTime / 1000))
-               .add("data", getSerializedMutations(version, batch.decodedMutations))
-               .add("version", version);
-        return builder.buildAsMutation();
-    }
-
-    @SuppressWarnings("deprecation")
-    private static Mutation getRemoveMutation(UUID uuid)
-    {
-        return new Mutation(PartitionUpdate.fullPartitionDelete(SystemKeyspace.LegacyBatchlog,
-                                                                UUIDType.instance.decompose(uuid),
-                                                                FBUtilities.timestampMicros(),
-                                                                FBUtilities.nowInSeconds()));
-    }
-
-    private static ByteBuffer getSerializedMutations(int version, Collection<Mutation> mutations)
-    {
-        try (DataOutputBuffer buf = new DataOutputBuffer())
-        {
-            buf.writeInt(mutations.size());
-            for (Mutation mutation : mutations)
-                Mutation.serializer.serialize(mutation, buf, version);
-            return buf.buffer();
-        }
-        catch (IOException e)
-        {
-            throw new RuntimeException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/config/CFMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java
index a60700c..8f11089 100644
--- a/src/java/org/apache/cassandra/config/CFMetaData.java
+++ b/src/java/org/apache/cassandra/config/CFMetaData.java
@@ -94,8 +94,6 @@ public final class CFMetaData
     public volatile ClusteringComparator comparator;  // bytes, long, timeuuid, utf8, etc. This is built directly from clusteringColumns
     public final IPartitioner partitioner;            // partitioner the table uses
 
-    private final Serializers serializers;
-
     // non-final, for now
     public volatile TableParams params = TableParams.DEFAULT;
 
@@ -303,7 +301,6 @@ public final class CFMetaData
         rebuild();
 
         this.resource = DataResource.table(ksName, cfName);
-        this.serializers = new Serializers(this);
     }
 
     // This rebuild informations that are intrinsically duplicate of the table definition but
@@ -1115,11 +1112,6 @@ public final class CFMetaData
         return isView;
     }
 
-    public Serializers serializers()
-    {
-        return serializers;
-    }
-
     public AbstractType<?> makeLegacyDefaultValidator()
     {
         return isCounter()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
index 53ac68c..7b034ea 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
@@ -556,9 +556,6 @@ public final class StatementRestrictions
                                                VariableSpecifications boundNames,
                                                SecondaryIndexManager indexManager)
     {
-        if (!MessagingService.instance().areAllNodesAtLeast30())
-            throw new InvalidRequestException("Please upgrade all nodes to at least 3.0 before using custom index expressions");
-
         if (expressions.size() > 1)
             throw new InvalidRequestException(IndexRestrictions.MULTIPLE_EXPRESSIONS);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index f46e6f7..2234d79 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -747,8 +747,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
                                                descriptor.ksname,
                                                descriptor.cfname,
                                                fileIndexGenerator.incrementAndGet(),
-                                               descriptor.formatType,
-                                               descriptor.digestComponent);
+                                               descriptor.formatType);
             }
             while (new File(newDescriptor.filenameFor(Component.DATA)).exists());
 
@@ -815,26 +814,24 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         return name;
     }
 
-    public String getSSTablePath(File directory)
+    public Descriptor newSSTableDescriptor(File directory)
     {
-        return getSSTablePath(directory, SSTableFormat.Type.current().info.getLatestVersion(), SSTableFormat.Type.current());
+        return newSSTableDescriptor(directory, SSTableFormat.Type.current().info.getLatestVersion(), SSTableFormat.Type.current());
     }
 
-    public String getSSTablePath(File directory, SSTableFormat.Type format)
+    public Descriptor newSSTableDescriptor(File directory, SSTableFormat.Type format)
     {
-        return getSSTablePath(directory, format.info.getLatestVersion(), format);
+        return newSSTableDescriptor(directory, format.info.getLatestVersion(), format);
     }
 
-    private String getSSTablePath(File directory, Version version, SSTableFormat.Type format)
+    private Descriptor newSSTableDescriptor(File directory, Version version, SSTableFormat.Type format)
     {
-        Descriptor desc = new Descriptor(version,
-                                         directory,
-                                         keyspace.getName(),
-                                         name,
-                                         fileIndexGenerator.incrementAndGet(),
-                                         format,
-                                         Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType()));
-        return desc.filenameFor(Component.DATA);
+        return new Descriptor(version,
+                              directory,
+                              keyspace.getName(),
+                              name,
+                              fileIndexGenerator.incrementAndGet(),
+                              format);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index e0e1c08..2bb4784 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -260,9 +260,8 @@ public class Directories
                         if (file.isDirectory())
                             return false;
 
-                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(),
-                                                                                            file.getName());
-                        return pair != null && pair.left.ksname.equals(metadata.ksName) && pair.left.cfname.equals(metadata.cfName);
+                        Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+                        return desc != null && desc.ksname.equals(metadata.ksName) && desc.cfname.equals(metadata.cfName);
 
                     }
                 });
@@ -308,8 +307,9 @@ public class Directories
     {
         for (File dir : dataPaths)
         {
-            if (new File(dir, filename).exists())
-                return Descriptor.fromFilename(dir, filename).left;
+            File file = new File(dir, filename);
+            if (file.exists())
+                return Descriptor.fromFilename(file);
         }
         return null;
     }
@@ -755,7 +755,7 @@ public class Directories
                             return false;
 
                     case FINAL:
-                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName());
+                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file);
                         if (pair == null)
                             return false;
 
@@ -769,24 +769,6 @@ public class Directories
                             previous = new HashSet<>();
                             components.put(pair.left, previous);
                         }
-                        else if (pair.right.type == Component.Type.DIGEST)
-                        {
-                            if (pair.right != pair.left.digestComponent)
-                            {
-                                // Need to update the DIGEST component as it might be set to another
-                                // digest type as a guess. This may happen if the first component is
-                                // not the DIGEST (but the Data component for example), so the digest
-                                // type is _guessed_ from the Version.
-                                // Although the Version explicitly defines the digest type, it doesn't
-                                // seem to be true under all circumstances. Generated sstables from a
-                                // post 2.1.8 snapshot produced Digest.sha1 files although Version
-                                // defines Adler32.
-                                // TL;DR this piece of code updates the digest component to be "correct".
-                                components.remove(pair.left);
-                                Descriptor updated = pair.left.withDigestComponent(pair.right);
-                                components.put(updated, previous);
-                            }
-                        }
                         previous.add(pair.right);
                         nbFiles++;
                         return false;
@@ -1043,11 +1025,11 @@ public class Directories
         public boolean isAcceptable(Path path)
         {
             File file = path.toFile();
-            Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(path.getParent().toFile(), file.getName());
-            return pair != null
-                    && pair.left.ksname.equals(metadata.ksName)
-                    && pair.left.cfname.equals(metadata.cfName)
-                    && !toSkip.contains(file);
+            Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+            return desc != null
+                && desc.ksname.equals(metadata.ksName)
+                && desc.cfname.equals(metadata.cfName)
+                && !toSkip.contains(file);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/LegacyLayout.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/LegacyLayout.java b/src/java/org/apache/cassandra/db/LegacyLayout.java
index ab62a0e..ad0f1b7 100644
--- a/src/java/org/apache/cassandra/db/LegacyLayout.java
+++ b/src/java/org/apache/cassandra/db/LegacyLayout.java
@@ -17,33 +17,24 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.DataInput;
 import java.io.IOException;
-import java.io.IOError;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.*;
 
-import org.apache.cassandra.config.SchemaConstants;
 import org.apache.cassandra.utils.AbstractIterator;
 import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import com.google.common.collect.PeekingIterator;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
@@ -52,18 +43,8 @@ import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
  */
 public abstract class LegacyLayout
 {
-    private static final Logger logger = LoggerFactory.getLogger(LegacyLayout.class);
-
     public final static int MAX_CELL_NAME_LENGTH = FBUtilities.MAX_UNSIGNED_SHORT;
 
-    public final static int STATIC_PREFIX = 0xFFFF;
-
-    public final static int DELETION_MASK        = 0x01;
-    public final static int EXPIRATION_MASK      = 0x02;
-    public final static int COUNTER_MASK         = 0x04;
-    public final static int COUNTER_UPDATE_MASK  = 0x08;
-    private final static int RANGE_TOMBSTONE_MASK = 0x10;
-
     private LegacyLayout() {}
 
     public static AbstractType<?> makeLegacyComparator(CFMetaData metadata)
@@ -135,7 +116,7 @@ public abstract class LegacyLayout
         return decodeCellName(metadata, cellname, false);
     }
 
-    public static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname, boolean readAllAsDynamic) throws UnknownColumnException
+    private static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname, boolean readAllAsDynamic) throws UnknownColumnException
     {
         Clustering clustering = decodeClustering(metadata, cellname);
 
@@ -233,30 +214,6 @@ public abstract class LegacyLayout
         return new LegacyBound(cb, metadata.isCompound() && CompositeType.isStaticName(bound), collectionName);
     }
 
-    public static ByteBuffer encodeBound(CFMetaData metadata, ClusteringBound bound, boolean isStart)
-    {
-        if (bound == ClusteringBound.BOTTOM || bound == ClusteringBound.TOP || metadata.comparator.size() == 0)
-            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
-
-        ClusteringPrefix clustering = bound.clustering();
-
-        if (!metadata.isCompound())
-        {
-            assert clustering.size() == 1;
-            return clustering.get(0);
-        }
-
-        CompositeType ctype = CompositeType.getInstance(metadata.comparator.subtypes());
-        CompositeType.Builder builder = ctype.builder();
-        for (int i = 0; i < clustering.size(); i++)
-            builder.add(clustering.get(i));
-
-        if (isStart)
-            return bound.isInclusive() ? builder.build() : builder.buildAsEndOfRange();
-        else
-            return bound.isInclusive() ? builder.buildAsEndOfRange() : builder.build();
-    }
-
     public static ByteBuffer encodeCellName(CFMetaData metadata, ClusteringPrefix clustering, ByteBuffer columnName, ByteBuffer collectionElement)
     {
         boolean isStatic = clustering == Clustering.STATIC_CLUSTERING;
@@ -330,213 +287,6 @@ public abstract class LegacyLayout
         return Clustering.make(components.subList(0, Math.min(csize, components.size())).toArray(new ByteBuffer[csize]));
     }
 
-    public static ByteBuffer encodeClustering(CFMetaData metadata, ClusteringPrefix clustering)
-    {
-        if (clustering.size() == 0)
-            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
-
-        if (!metadata.isCompound())
-        {
-            assert clustering.size() == 1;
-            return clustering.get(0);
-        }
-
-        ByteBuffer[] values = new ByteBuffer[clustering.size()];
-        for (int i = 0; i < clustering.size(); i++)
-            values[i] = clustering.get(i);
-        return CompositeType.build(values);
-    }
-
-    /**
-     * The maximum number of cells to include per partition when converting to the old format.
-     * <p>
-     * We already apply the limit during the actual query, but for queries that counts cells and not rows (thrift queries
-     * and distinct queries as far as old nodes are concerned), we may still include a little bit more than requested
-     * because {@link DataLimits} always include full rows. So if the limit ends in the middle of a queried row, the
-     * full row will be part of our result. This would confuse old nodes however so we make sure to truncate it to
-     * what's expected before writting it on the wire.
-     *
-     * @param command the read commmand for which to determine the maximum cells per partition. This can be {@code null}
-     * in which case {@code Integer.MAX_VALUE} is returned.
-     * @return the maximum number of cells per partition that should be enforced according to the read command if
-     * post-query limitation are in order (see above). This will be {@code Integer.MAX_VALUE} if no such limits are
-     * necessary.
-     */
-    private static int maxCellsPerPartition(ReadCommand command)
-    {
-        if (command == null)
-            return Integer.MAX_VALUE;
-
-        DataLimits limits = command.limits();
-
-        // There is 2 types of DISTINCT queries: those that includes only the partition key, and those that include static columns.
-        // On old nodes, the latter expects the first row in term of CQL count, which is what we already have and there is no additional
-        // limit to apply. The former however expect only one cell per partition and rely on it (See CASSANDRA-10762).
-        if (limits.isDistinct())
-            return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE;
-
-        switch (limits.kind())
-        {
-            case THRIFT_LIMIT:
-            case SUPER_COLUMN_COUNTING_LIMIT:
-                return limits.perPartitionCount();
-            default:
-                return Integer.MAX_VALUE;
-        }
-    }
-
-    // For serializing to old wire format
-    public static LegacyUnfilteredPartition fromUnfilteredRowIterator(ReadCommand command, UnfilteredRowIterator iterator)
-    {
-        // we need to extract the range tombstone so materialize the partition. Since this is
-        // used for the on-wire format, this is not worst than it used to be.
-        final ImmutableBTreePartition partition = ImmutableBTreePartition.create(iterator);
-        DeletionInfo info = partition.deletionInfo();
-        Pair<LegacyRangeTombstoneList, Iterator<LegacyCell>> pair = fromRowIterator(partition.metadata(), partition.iterator(), partition.staticRow());
-
-        LegacyLayout.LegacyRangeTombstoneList rtl = pair.left;
-
-        // Processing the cell iterator results in the LegacyRangeTombstoneList being populated, so we do this
-        // before we use the LegacyRangeTombstoneList at all
-        List<LegacyLayout.LegacyCell> cells = Lists.newArrayList(pair.right);
-
-        int maxCellsPerPartition = maxCellsPerPartition(command);
-        if (cells.size() > maxCellsPerPartition)
-            cells = cells.subList(0, maxCellsPerPartition);
-
-        // The LegacyRangeTombstoneList already has range tombstones for the single-row deletions and complex
-        // deletions.  Go through our normal range tombstones and add then to the LegacyRTL so that the range
-        // tombstones all get merged and sorted properly.
-        if (info.hasRanges())
-        {
-            Iterator<RangeTombstone> rangeTombstoneIterator = info.rangeIterator(false);
-            while (rangeTombstoneIterator.hasNext())
-            {
-                RangeTombstone rt = rangeTombstoneIterator.next();
-                Slice slice = rt.deletedSlice();
-                LegacyLayout.LegacyBound start = new LegacyLayout.LegacyBound(slice.start(), false, null);
-                LegacyLayout.LegacyBound end = new LegacyLayout.LegacyBound(slice.end(), false, null);
-                rtl.add(start, end, rt.deletionTime().markedForDeleteAt(), rt.deletionTime().localDeletionTime());
-            }
-        }
-
-        return new LegacyUnfilteredPartition(info.getPartitionDeletion(), rtl, cells);
-    }
-
-    public static void serializeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, DataOutputPlus out, int version) throws IOException
-    {
-        assert version < MessagingService.VERSION_30;
-
-        out.writeBoolean(true);
-
-        LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition);
-
-        UUIDSerializer.serializer.serialize(partition.metadata().cfId, out, version);
-        DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out);
-
-        legacyPartition.rangeTombstones.serialize(out, partition.metadata());
-
-        // begin cell serialization
-        out.writeInt(legacyPartition.cells.size());
-        for (LegacyLayout.LegacyCell cell : legacyPartition.cells)
-        {
-            ByteBufferUtil.writeWithShortLength(cell.name.encode(partition.metadata()), out);
-            out.writeByte(cell.serializationFlags());
-            if (cell.isExpiring())
-            {
-                out.writeInt(cell.ttl);
-                out.writeInt(cell.localDeletionTime);
-            }
-            else if (cell.isTombstone())
-            {
-                out.writeLong(cell.timestamp);
-                out.writeInt(TypeSizes.sizeof(cell.localDeletionTime));
-                out.writeInt(cell.localDeletionTime);
-                continue;
-            }
-            else if (cell.isCounterUpdate())
-            {
-                out.writeLong(cell.timestamp);
-                long count = CounterContext.instance().getLocalCount(cell.value);
-                ByteBufferUtil.writeWithLength(ByteBufferUtil.bytes(count), out);
-                continue;
-            }
-            else if (cell.isCounter())
-            {
-                out.writeLong(Long.MIN_VALUE);  // timestampOfLastDelete (not used, and MIN_VALUE is the default)
-            }
-
-            out.writeLong(cell.timestamp);
-            ByteBufferUtil.writeWithLength(cell.value, out);
-        }
-    }
-
-    // For the old wire format
-    // Note: this can return null if an empty partition is serialized!
-    public static UnfilteredRowIterator deserializeLegacyPartition(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException
-    {
-        assert version < MessagingService.VERSION_30;
-
-        // This is only used in mutation, and mutation have never allowed "null" column families
-        boolean present = in.readBoolean();
-        if (!present)
-            return null;
-
-        CFMetaData metadata = CFMetaData.serializer.deserialize(in, version);
-        LegacyDeletionInfo info = LegacyDeletionInfo.deserialize(metadata, in);
-        int size = in.readInt();
-        Iterator<LegacyCell> cells = deserializeCells(metadata, in, flag, size);
-        SerializationHelper helper = new SerializationHelper(metadata, version, flag);
-        return onWireCellstoUnfilteredRowIterator(metadata, metadata.partitioner.decorateKey(key), info, cells, false, helper);
-    }
-
-    // For the old wire format
-    public static long serializedSizeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, int version)
-    {
-        assert version < MessagingService.VERSION_30;
-
-        if (partition.isEmpty())
-            return TypeSizes.sizeof(false);
-
-        long size = TypeSizes.sizeof(true);
-
-        LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition);
-
-        size += UUIDSerializer.serializer.serializedSize(partition.metadata().cfId, version);
-        size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
-        size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
-
-        // begin cell serialization
-        size += TypeSizes.sizeof(legacyPartition.cells.size());
-        for (LegacyLayout.LegacyCell cell : legacyPartition.cells)
-        {
-            size += ByteBufferUtil.serializedSizeWithShortLength(cell.name.encode(partition.metadata()));
-            size += 1;  // serialization flags
-            if (cell.kind == LegacyLayout.LegacyCell.Kind.EXPIRING)
-            {
-                size += TypeSizes.sizeof(cell.ttl);
-                size += TypeSizes.sizeof(cell.localDeletionTime);
-            }
-            else if (cell.kind == LegacyLayout.LegacyCell.Kind.DELETED)
-            {
-                size += TypeSizes.sizeof(cell.timestamp);
-                // localDeletionTime replaces cell.value as the body
-                size += TypeSizes.sizeof(TypeSizes.sizeof(cell.localDeletionTime));
-                size += TypeSizes.sizeof(cell.localDeletionTime);
-                continue;
-            }
-            else if (cell.kind == LegacyLayout.LegacyCell.Kind.COUNTER)
-            {
-                size += TypeSizes.sizeof(Long.MIN_VALUE);  // timestampOfLastDelete
-            }
-
-            size += TypeSizes.sizeof(cell.timestamp);
-            size += ByteBufferUtil.serializedSizeWithLength(cell.value);
-        }
-
-        return size;
-    }
-
     // For thrift sake
     public static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata,
                                                                 DecoratedKey key,
@@ -547,32 +297,6 @@ public abstract class LegacyLayout
         return toUnfilteredRowIterator(metadata, key, delInfo, cells, false, helper);
     }
 
-    // For deserializing old wire format
-    public static UnfilteredRowIterator onWireCellstoUnfilteredRowIterator(CFMetaData metadata,
-                                                                           DecoratedKey key,
-                                                                           LegacyDeletionInfo delInfo,
-                                                                           Iterator<LegacyCell> cells,
-                                                                           boolean reversed,
-                                                                           SerializationHelper helper)
-    {
-
-        // If the table is a static compact, the "column_metadata" are now internally encoded as
-        // static. This has already been recognized by decodeCellName, but it means the cells
-        // provided are not in the expected order (the "static" cells are not necessarily at the front).
-        // So sort them to make sure toUnfilteredRowIterator works as expected.
-        // Further, if the query is reversed, then the on-wire format still has cells in non-reversed
-        // order, but we need to have them reverse in the final UnfilteredRowIterator. So reverse them.
-        if (metadata.isStaticCompactTable() || reversed)
-        {
-            List<LegacyCell> l = new ArrayList<>();
-            Iterators.addAll(l, cells);
-            Collections.sort(l, legacyCellComparator(metadata, reversed));
-            cells = l.iterator();
-        }
-
-        return toUnfilteredRowIterator(metadata, key, delInfo, cells, reversed, helper);
-    }
-
     private static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata,
                                                                  DecoratedKey key,
                                                                  LegacyDeletionInfo delInfo,
@@ -624,47 +348,6 @@ public abstract class LegacyLayout
                                                true);
     }
 
-    public static Row extractStaticColumns(CFMetaData metadata, DataInputPlus in, Columns statics) throws IOException
-    {
-        assert !statics.isEmpty();
-        assert metadata.isCompactTable();
-
-        if (metadata.isSuper())
-            // TODO: there is in practice nothing to do here, but we need to handle the column_metadata for super columns somewhere else
-            throw new UnsupportedOperationException();
-
-        Set<ByteBuffer> columnsToFetch = new HashSet<>(statics.size());
-        for (ColumnDefinition column : statics)
-            columnsToFetch.add(column.name.bytes);
-
-        Row.Builder builder = BTreeRow.unsortedBuilder(FBUtilities.nowInSeconds());
-        builder.newRow(Clustering.STATIC_CLUSTERING);
-
-        boolean foundOne = false;
-        LegacyAtom atom;
-        while ((atom = readLegacyAtom(metadata, in, false)) != null)
-        {
-            if (atom.isCell())
-            {
-                LegacyCell cell = atom.asCell();
-                if (!columnsToFetch.contains(cell.name.encode(metadata)))
-                    continue;
-
-                foundOne = true;
-                builder.addCell(new BufferCell(cell.name.column, cell.timestamp, cell.ttl, cell.localDeletionTime, cell.value, null));
-            }
-            else
-            {
-                LegacyRangeTombstone tombstone = atom.asRangeTombstone();
-                // TODO: we need to track tombstones and potentially ignore cells that are
-                // shadowed (or even better, replace them by tombstones).
-                throw new UnsupportedOperationException();
-            }
-        }
-
-        return foundOne ? builder.build() : Rows.EMPTY_STATIC_ROW;
-    }
-
     private static Row getNextRow(CellGrouper grouper, PeekingIterator<? extends LegacyAtom> cells)
     {
         if (!cells.hasNext())
@@ -714,7 +397,7 @@ public abstract class LegacyLayout
             private Iterator<LegacyCell> initializeRow()
             {
                 if (staticRow == null || staticRow.isEmpty())
-                    return Collections.<LegacyLayout.LegacyCell>emptyIterator();
+                    return Collections.emptyIterator();
 
                 Pair<LegacyRangeTombstoneList, Iterator<LegacyCell>> row = fromRow(metadata, staticRow);
                 deletions.addAll(row.left);
@@ -843,7 +526,7 @@ public abstract class LegacyLayout
         return legacyCellComparator(metadata, false);
     }
 
-    public static Comparator<LegacyCell> legacyCellComparator(final CFMetaData metadata, final boolean reversed)
+    private static Comparator<LegacyCell> legacyCellComparator(final CFMetaData metadata, final boolean reversed)
     {
         final Comparator<LegacyCellName> cellNameComparator = legacyCellNameComparator(metadata, reversed);
         return new Comparator<LegacyCell>()
@@ -1013,121 +696,7 @@ public abstract class LegacyLayout
         };
     }
 
-    public static LegacyAtom readLegacyAtom(CFMetaData metadata, DataInputPlus in, boolean readAllAsDynamic) throws IOException
-    {
-        while (true)
-        {
-            ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in);
-            if (!cellname.hasRemaining())
-                return null; // END_OF_ROW
-
-            try
-            {
-                int b = in.readUnsignedByte();
-                return (b & RANGE_TOMBSTONE_MASK) != 0
-                    ? readLegacyRangeTombstoneBody(metadata, in, cellname)
-                    : readLegacyCellBody(metadata, in, cellname, b, SerializationHelper.Flag.LOCAL, readAllAsDynamic);
-            }
-            catch (UnknownColumnException e)
-            {
-                // We can get there if we read a cell for a dropped column, and ff that is the case,
-                // then simply ignore the cell is fine. But also not that we ignore if it's the
-                // system keyspace because for those table we actually remove columns without registering
-                // them in the dropped columns
-                assert metadata.ksName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null : e.getMessage();
-            }
-        }
-    }
-
-    public static LegacyCell readLegacyCell(CFMetaData metadata, DataInput in, SerializationHelper.Flag flag) throws IOException, UnknownColumnException
-    {
-        ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in);
-        int b = in.readUnsignedByte();
-        return readLegacyCellBody(metadata, in, cellname, b, flag, false);
-    }
-
-    public static LegacyCell readLegacyCellBody(CFMetaData metadata, DataInput in, ByteBuffer cellname, int mask, SerializationHelper.Flag flag, boolean readAllAsDynamic)
-    throws IOException, UnknownColumnException
-    {
-        // Note that we want to call decodeCellName only after we've deserialized other parts, since it can throw
-        // and we want to throw only after having deserialized the full cell.
-        if ((mask & COUNTER_MASK) != 0)
-        {
-            in.readLong(); // timestampOfLastDelete: this has been unused for a long time so we ignore it
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value)))
-                value = CounterContext.instance().clearAllLocal(value);
-            return new LegacyCell(LegacyCell.Kind.COUNTER, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL);
-        }
-        else if ((mask & EXPIRATION_MASK) != 0)
-        {
-            int ttl = in.readInt();
-            int expiration = in.readInt();
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl);
-        }
-        else
-        {
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic);
-            return (mask & COUNTER_UPDATE_MASK) != 0
-                ? new LegacyCell(LegacyCell.Kind.COUNTER, name, CounterContext.instance().createLocal(ByteBufferUtil.toLong(value)), ts, Cell.NO_DELETION_TIME, Cell.NO_TTL)
-                : ((mask & DELETION_MASK) == 0
-                        ? new LegacyCell(LegacyCell.Kind.REGULAR, name, value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL)
-                        : new LegacyCell(LegacyCell.Kind.DELETED, name, ByteBufferUtil.EMPTY_BYTE_BUFFER, ts, ByteBufferUtil.toInt(value), Cell.NO_TTL));
-        }
-    }
-
-    public static LegacyRangeTombstone readLegacyRangeTombstoneBody(CFMetaData metadata, DataInputPlus in, ByteBuffer boundname) throws IOException
-    {
-        LegacyBound min = decodeBound(metadata, boundname, true);
-        LegacyBound max = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false);
-        DeletionTime dt = DeletionTime.serializer.deserialize(in);
-        return new LegacyRangeTombstone(min, max, dt);
-    }
-
-    public static Iterator<LegacyCell> deserializeCells(final CFMetaData metadata,
-                                                        final DataInput in,
-                                                        final SerializationHelper.Flag flag,
-                                                        final int size)
-    {
-        return new AbstractIterator<LegacyCell>()
-        {
-            private int i = 0;
-
-            protected LegacyCell computeNext()
-            {
-                if (i >= size)
-                    return endOfData();
-
-                ++i;
-                try
-                {
-                    return readLegacyCell(metadata, in, flag);
-                }
-                catch (UnknownColumnException e)
-                {
-                    // We can get there if we read a cell for a dropped column, and if that is the case,
-                    // then simply ignore the cell is fine. But also not that we ignore if it's the
-                    // system keyspace because for those table we actually remove columns without registering
-                    // them in the dropped columns
-                    if (metadata.ksName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null)
-                        return computeNext();
-                    else
-                        throw new IOError(e);
-                }
-                catch (IOException e)
-                {
-                    throw new IOError(e);
-                }
-            }
-        };
-    }
-
-    public static class CellGrouper
+    private static class CellGrouper
     {
         public final CFMetaData metadata;
         private final boolean isStatic;
@@ -1285,53 +854,6 @@ public abstract class LegacyLayout
         }
     }
 
-    public static class LegacyUnfilteredPartition
-    {
-        public final DeletionTime partitionDeletion;
-        public final LegacyRangeTombstoneList rangeTombstones;
-        public final List<LegacyCell> cells;
-
-        private LegacyUnfilteredPartition(DeletionTime partitionDeletion, LegacyRangeTombstoneList rangeTombstones, List<LegacyCell> cells)
-        {
-            this.partitionDeletion = partitionDeletion;
-            this.rangeTombstones = rangeTombstones;
-            this.cells = cells;
-        }
-
-        public void digest(CFMetaData metadata, MessageDigest digest)
-        {
-            for (LegacyCell cell : cells)
-            {
-                digest.update(cell.name.encode(metadata).duplicate());
-
-                if (cell.isCounter())
-                    CounterContext.instance().updateDigest(digest, cell.value);
-                else
-                    digest.update(cell.value.duplicate());
-
-                FBUtilities.updateWithLong(digest, cell.timestamp);
-                FBUtilities.updateWithByte(digest, cell.serializationFlags());
-
-                if (cell.isExpiring())
-                    FBUtilities.updateWithInt(digest, cell.ttl);
-
-                if (cell.isCounter())
-                {
-                    // Counters used to have the timestampOfLastDelete field, which we stopped using long ago and has been hard-coded
-                    // to Long.MIN_VALUE but was still taken into account in 2.2 counter digests (to maintain backward compatibility
-                    // in the first place).
-                    FBUtilities.updateWithLong(digest, Long.MIN_VALUE);
-                }
-            }
-
-            if (partitionDeletion.markedForDeleteAt() != Long.MIN_VALUE)
-                digest.update(ByteBufferUtil.bytes(partitionDeletion.markedForDeleteAt()));
-
-            if (!rangeTombstones.isEmpty())
-                rangeTombstones.updateDigest(digest);
-        }
-    }
-
     public static class LegacyCellName
     {
         public final Clustering clustering;
@@ -1822,7 +1344,7 @@ public abstract class LegacyLayout
      * This class is needed to allow us to convert single-row deletions and complex deletions into range tombstones
      * and properly merge them into the normal set of range tombstones.
      */
-    public static class LegacyRangeTombstoneList
+    private static class LegacyRangeTombstoneList
     {
         private final LegacyBoundComparator comparator;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index a063bf4..987381c 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -449,9 +449,9 @@ public class Memtable implements Comparable<Memtable>
             this.isBatchLogTable = cfs.name.equals(SystemKeyspace.BATCHES) && cfs.keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME);
 
             if (flushLocation == null)
-                writer = createFlushWriter(txn, cfs.getSSTablePath(getDirectories().getWriteableLocationAsFile(estimatedSize)), columnsCollector.get(), statsCollector.get());
+                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getWriteableLocationAsFile(estimatedSize)), columnsCollector.get(), statsCollector.get());
             else
-                writer = createFlushWriter(txn, cfs.getSSTablePath(getDirectories().getLocationForDisk(flushLocation)), columnsCollector.get(), statsCollector.get());
+                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(flushLocation)), columnsCollector.get(), statsCollector.get());
 
         }
 
@@ -503,14 +503,14 @@ public class Memtable implements Comparable<Memtable>
         }
 
         public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn,
-                                                  String filename,
-                                                  PartitionColumns columns,
-                                                  EncodingStats stats)
+                                                    Descriptor descriptor,
+                                                    PartitionColumns columns,
+                                                    EncodingStats stats)
         {
             MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator)
                     .commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get()));
 
-            return cfs.createSSTableMultiWriter(Descriptor.fromFilename(filename),
+            return cfs.createSSTableMultiWriter(descriptor,
                                                 toFlush.size(),
                                                 ActiveRepairService.UNREPAIRED_SSTABLE,
                                                 sstableMetadataCollector,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Mutation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Mutation.java b/src/java/org/apache/cassandra/db/Mutation.java
index b08d6e5..5a571e8 100644
--- a/src/java/org/apache/cassandra/db/Mutation.java
+++ b/src/java/org/apache/cassandra/db/Mutation.java
@@ -368,21 +368,9 @@ public class Mutation implements IMutation
     {
         public void serialize(Mutation mutation, DataOutputPlus out, int version) throws IOException
         {
-            if (version < MessagingService.VERSION_20)
-                out.writeUTF(mutation.getKeyspaceName());
-
             /* serialize the modifications in the mutation */
             int size = mutation.modifications.size();
-
-            if (version < MessagingService.VERSION_30)
-            {
-                ByteBufferUtil.writeWithShortLength(mutation.key().getKey(), out);
-                out.writeInt(size);
-            }
-            else
-            {
-                out.writeUnsignedVInt(size);
-            }
+            out.writeUnsignedVInt(size);
 
             assert size > 0;
             for (Map.Entry<UUID, PartitionUpdate> entry : mutation.modifications.entrySet())
@@ -391,24 +379,10 @@ public class Mutation implements IMutation
 
         public Mutation deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag) throws IOException
         {
-            if (version < MessagingService.VERSION_20)
-                in.readUTF(); // read pre-2.0 keyspace name
-
-            ByteBuffer key = null;
-            int size;
-            if (version < MessagingService.VERSION_30)
-            {
-                key = ByteBufferUtil.readWithShortLength(in);
-                size = in.readInt();
-            }
-            else
-            {
-                size = (int)in.readUnsignedVInt();
-            }
-
+            int size = (int)in.readUnsignedVInt();
             assert size > 0;
 
-            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, flag, key);
+            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, flag);
             if (size == 1)
                 return new Mutation(update);
 
@@ -418,7 +392,7 @@ public class Mutation implements IMutation
             modifications.put(update.metadata().cfId, update);
             for (int i = 1; i < size; ++i)
             {
-                update = PartitionUpdate.serializer.deserialize(in, version, flag, dk);
+                update = PartitionUpdate.serializer.deserialize(in, version, flag);
                 modifications.put(update.metadata().cfId, update);
             }
 
@@ -432,22 +406,7 @@ public class Mutation implements IMutation
 
         public long serializedSize(Mutation mutation, int version)
         {
-            int size = 0;
-
-            if (version < MessagingService.VERSION_20)
-                size += TypeSizes.sizeof(mutation.getKeyspaceName());
-
-            if (version < MessagingService.VERSION_30)
-            {
-                int keySize = mutation.key().getKey().remaining();
-                size += TypeSizes.sizeof((short) keySize) + keySize;
-                size += TypeSizes.sizeof(mutation.modifications.size());
-            }
-            else
-            {
-                size += TypeSizes.sizeofUnsignedVInt(mutation.modifications.size());
-            }
-
+            int size = TypeSizes.sizeofUnsignedVInt(mutation.modifications.size());
             for (Map.Entry<UUID, PartitionUpdate> entry : mutation.modifications.entrySet())
                 size += PartitionUpdate.serializer.serializedSize(entry.getValue(), version);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/MutationVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/MutationVerbHandler.java b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
index 5888438..59247a2 100644
--- a/src/java/org/apache/cassandra/db/MutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
@@ -21,7 +21,6 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.InetAddress;
 
-import org.apache.cassandra.batchlog.LegacyBatchlogMigrator;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.io.util.FastByteArrayInputStream;
 import org.apache.cassandra.net.*;
@@ -59,16 +58,10 @@ public class MutationVerbHandler implements IVerbHandler<Mutation>
 
         try
         {
-            if (message.version < MessagingService.VERSION_30 && LegacyBatchlogMigrator.isLegacyBatchlogMutation(message.payload))
-            {
-                LegacyBatchlogMigrator.handleLegacyMutation(message.payload);
-                reply(id, replyTo);
-            }
-            else
-                message.payload.applyFuture().thenAccept(o -> reply(id, replyTo)).exceptionally(wto -> {
-                    failed();
-                    return null;
-                });
+            message.payload.applyFuture().thenAccept(o -> reply(id, replyTo)).exceptionally(wto -> {
+                failed();
+                return null;
+            });
         }
         catch (WriteTimeoutException wto)
         {
@@ -76,10 +69,6 @@ public class MutationVerbHandler implements IVerbHandler<Mutation>
         }
     }
 
-    /**
-     * Older version (< 1.0) will not send this message at all, hence we don't
-     * need to check the version of the data.
-     */
     private static void forwardToLocalNodes(Mutation mutation, MessagingService.Verb verb, byte[] forwardBytes, InetAddress from) throws IOException
     {
         try (DataInputStream in = new DataInputStream(new FastByteArrayInputStream(forwardBytes)))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
index 50b568e..3cabf75 100644
--- a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
+++ b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
@@ -273,11 +273,9 @@ public class PartitionRangeReadCommand extends ReadCommand
         return Transformation.apply(iter, new CacheFilter());
     }
 
-    public MessageOut<ReadCommand> createMessage(int version)
+    public MessageOut<ReadCommand> createMessage()
     {
-        return dataRange().isPaging()
-             ? new MessageOut<>(MessagingService.Verb.PAGED_RANGE, this, pagedRangeSerializer)
-             : new MessageOut<>(MessagingService.Verb.RANGE_SLICE, this, rangeSliceSerializer);
+        return new MessageOut<>(MessagingService.Verb.RANGE_SLICE, this, serializer);
     }
 
     protected void appendCQLWhereClause(StringBuilder sb)


[04/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-TOC.txt
deleted file mode 100644
index 7f7fe79..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_compact/legacy_tables-legacy_ka_clust_compact-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Filter.db
-TOC.txt
-Statistics.db
-Summary.db
-Index.db
-Data.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-CompressionInfo.db
deleted file mode 100644
index 3c7291c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Data.db
deleted file mode 100644
index 3566e5a..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Digest.sha1
deleted file mode 100644
index a679541..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2539906592
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Index.db
deleted file mode 100644
index 51ddf91..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Statistics.db
deleted file mode 100644
index 36e9dc2..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-TOC.txt
deleted file mode 100644
index 7be41d8..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter/legacy_tables-legacy_ka_clust_counter-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Index.db
-Digest.sha1
-CompressionInfo.db
-Data.db
-Statistics.db
-Summary.db
-TOC.txt
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-CompressionInfo.db
deleted file mode 100644
index e3b71a4..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Data.db
deleted file mode 100644
index 90d42a5..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Digest.sha1
deleted file mode 100644
index 52e6552..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2793875907
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Index.db
deleted file mode 100644
index 10df1e8..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Statistics.db
deleted file mode 100644
index 8360ed5..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Summary.db
deleted file mode 100644
index 774cbd1..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-TOC.txt
deleted file mode 100644
index 7f7fe79..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_clust_counter_compact/legacy_tables-legacy_ka_clust_counter_compact-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Filter.db
-TOC.txt
-Statistics.db
-Summary.db
-Index.db
-Data.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-CompressionInfo.db
deleted file mode 100644
index c80e64c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Data.db
deleted file mode 100644
index b29a26a..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Digest.sha1
deleted file mode 100644
index c889c8d..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2802392853
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Index.db
deleted file mode 100644
index f0717e0..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Statistics.db
deleted file mode 100644
index 2af5467..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-TOC.txt
deleted file mode 100644
index 7be41d8..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple/legacy_tables-legacy_ka_simple-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Index.db
-Digest.sha1
-CompressionInfo.db
-Data.db
-Statistics.db
-Summary.db
-TOC.txt
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-CompressionInfo.db
deleted file mode 100644
index d530b73..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Data.db
deleted file mode 100644
index 6a38c52..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Digest.sha1
deleted file mode 100644
index be8e5fb..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-606280675
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Index.db
deleted file mode 100644
index d2ec218..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Statistics.db
deleted file mode 100644
index e3fd855..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Summary.db
deleted file mode 100644
index af8ad8b..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-TOC.txt
deleted file mode 100644
index 7f7fe79..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_compact/legacy_tables-legacy_ka_simple_compact-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Filter.db
-TOC.txt
-Statistics.db
-Summary.db
-Index.db
-Data.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-CompressionInfo.db
deleted file mode 100644
index 9c3416e..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Data.db
deleted file mode 100644
index 1aee64c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Digest.sha1
deleted file mode 100644
index 3da96e6..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3671794375
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Index.db
deleted file mode 100644
index 932936c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Statistics.db
deleted file mode 100644
index fa74e4b..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-TOC.txt
deleted file mode 100644
index 7be41d8..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter/legacy_tables-legacy_ka_simple_counter-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Index.db
-Digest.sha1
-CompressionInfo.db
-Data.db
-Statistics.db
-Summary.db
-TOC.txt
-Filter.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-CompressionInfo.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-CompressionInfo.db
deleted file mode 100644
index 01c5478..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Data.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Data.db
deleted file mode 100644
index 5f4a7db..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Digest.sha1 b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Digest.sha1
deleted file mode 100644
index a71f766..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-616768162
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Filter.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Index.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Index.db
deleted file mode 100644
index 48c153c..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Statistics.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Statistics.db
deleted file mode 100644
index 4a6e940..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Summary.db b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Summary.db
deleted file mode 100644
index af8ad8b..0000000
Binary files a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-TOC.txt b/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-TOC.txt
deleted file mode 100644
index 7f7fe79..0000000
--- a/test/data/legacy-sstables/ka/legacy_tables/legacy_ka_simple_counter_compact/legacy_tables-legacy_ka_simple_counter_compact-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Filter.db
-TOC.txt
-Statistics.db
-Summary.db
-Index.db
-Data.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-CompressionInfo.db
deleted file mode 100644
index 13701c4..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Data.db
deleted file mode 100644
index f04344a..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Digest.adler32
deleted file mode 100644
index d6157b2..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-1633775217
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Index.db
deleted file mode 100644
index 44b89c4..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Statistics.db
deleted file mode 100644
index a54d94d..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-TOC.txt
deleted file mode 100644
index dec3a3f..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CompressionInfo.db
-Digest.adler32
-Filter.db
-Summary.db
-Data.db
-Statistics.db
-TOC.txt
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-CompressionInfo.db
deleted file mode 100644
index 2a72f70..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Data.db
deleted file mode 100644
index 6bc08d2..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Digest.adler32
deleted file mode 100644
index 943dd1e..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-1372047449
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Index.db
deleted file mode 100644
index 9e18f8e..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Statistics.db
deleted file mode 100644
index b2fd408..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Summary.db
deleted file mode 100644
index 6cd998f..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-TOC.txt
deleted file mode 100644
index 0aef810..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_compact/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TOC.txt
-Statistics.db
-Digest.adler32
-CompressionInfo.db
-Summary.db
-Data.db
-Filter.db
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-CompressionInfo.db
deleted file mode 100644
index 0bdb82a..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Data.db
deleted file mode 100644
index 76d4cbc..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Digest.adler32
deleted file mode 100644
index e704111..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-287946299
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Index.db
deleted file mode 100644
index 51ddf91..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Statistics.db
deleted file mode 100644
index b6ad155..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-TOC.txt
deleted file mode 100644
index dec3a3f..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CompressionInfo.db
-Digest.adler32
-Filter.db
-Summary.db
-Data.db
-Statistics.db
-TOC.txt
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-CompressionInfo.db
deleted file mode 100644
index d4dec70..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Data.db
deleted file mode 100644
index 63ee721..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Digest.adler32
deleted file mode 100644
index 577407e..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-2583914481
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Index.db
deleted file mode 100644
index 10df1e8..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Statistics.db
deleted file mode 100644
index 2bfc59d..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Summary.db
deleted file mode 100644
index 6cd998f..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-TOC.txt
deleted file mode 100644
index 0aef810..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_clust_counter_compact/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TOC.txt
-Statistics.db
-Digest.adler32
-CompressionInfo.db
-Summary.db
-Data.db
-Filter.db
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-CompressionInfo.db
deleted file mode 100644
index c80e64c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Data.db
deleted file mode 100644
index ae136f5..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Digest.adler32
deleted file mode 100644
index dacf8ac..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-4239203875
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Index.db
deleted file mode 100644
index f0717e0..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Statistics.db
deleted file mode 100644
index 49b9275..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-TOC.txt
deleted file mode 100644
index dec3a3f..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CompressionInfo.db
-Digest.adler32
-Filter.db
-Summary.db
-Data.db
-Statistics.db
-TOC.txt
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-CompressionInfo.db
deleted file mode 100644
index d530b73..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Data.db
deleted file mode 100644
index 2e912a1..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Digest.adler32
deleted file mode 100644
index c07a57f..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-278403976
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Index.db
deleted file mode 100644
index d2ec218..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Statistics.db
deleted file mode 100644
index a81e03e..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Summary.db
deleted file mode 100644
index 6cd998f..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-TOC.txt
deleted file mode 100644
index 0aef810..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_compact/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TOC.txt
-Statistics.db
-Digest.adler32
-CompressionInfo.db
-Summary.db
-Data.db
-Filter.db
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-CompressionInfo.db
deleted file mode 100644
index 9c3416e..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Data.db
deleted file mode 100644
index 010bd1a..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Digest.adler32
deleted file mode 100644
index 562547a..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-590029692
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Index.db
deleted file mode 100644
index 932936c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Statistics.db
deleted file mode 100644
index 525a4b1..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Summary.db
deleted file mode 100644
index 35b5e22..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-TOC.txt
deleted file mode 100644
index dec3a3f..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CompressionInfo.db
-Digest.adler32
-Filter.db
-Summary.db
-Data.db
-Statistics.db
-TOC.txt
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-CompressionInfo.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-CompressionInfo.db
deleted file mode 100644
index 01c5478..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Data.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Data.db
deleted file mode 100644
index 323ff37..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Digest.adler32 b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Digest.adler32
deleted file mode 100644
index 92237e7..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-2048991053
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Filter.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Filter.db
deleted file mode 100644
index c3cb27c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Index.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Index.db
deleted file mode 100644
index 48c153c..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Statistics.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Statistics.db
deleted file mode 100644
index 37324a7..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Summary.db b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Summary.db
deleted file mode 100644
index 6cd998f..0000000
Binary files a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-TOC.txt b/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-TOC.txt
deleted file mode 100644
index 0aef810..0000000
--- a/test/data/legacy-sstables/la/legacy_tables/legacy_la_simple_counter_compact/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TOC.txt
-Statistics.db
-Digest.adler32
-CompressionInfo.db
-Summary.db
-Data.db
-Filter.db
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-CompressionInfo.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-CompressionInfo.db
deleted file mode 100644
index d9446df..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Data.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Data.db
deleted file mode 100644
index f7b696d..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Digest.sha1 b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Digest.sha1
deleted file mode 100644
index 55756dd..0000000
--- a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3043896114
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Filter.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Filter.db
deleted file mode 100644
index 3015f10..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Index.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Index.db
deleted file mode 100644
index c8b59fb..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Statistics.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Statistics.db
deleted file mode 100644
index 8535f6a..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Summary.db b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Summary.db
deleted file mode 100644
index d9ce8c2..0000000
Binary files a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-TOC.txt b/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-TOC.txt
deleted file mode 100644
index 7dc8930..0000000
--- a/test/data/migration-sstables/2.1/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/system-compactions_in_progress-ka-1-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Data.db
-TOC.txt
-Filter.db
-Statistics.db
-Summary.db
-Index.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-CompressionInfo.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-CompressionInfo.db
deleted file mode 100644
index b867db8..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Data.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Data.db
deleted file mode 100644
index f14d86d..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Digest.sha1 b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Digest.sha1
deleted file mode 100644
index 2f4daa9..0000000
--- a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4283441474
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Filter.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Filter.db
deleted file mode 100644
index a5bdd8e..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Index.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Index.db
deleted file mode 100644
index 5d71315..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Statistics.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Statistics.db
deleted file mode 100644
index aeb2bb8..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Summary.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Summary.db
deleted file mode 100644
index 602ec06..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-TOC.txt b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-TOC.txt
deleted file mode 100644
index 7dc8930..0000000
--- a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-ka-3-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Data.db
-TOC.txt
-Filter.db
-Statistics.db
-Summary.db
-Index.db
-Digest.sha1
-CompressionInfo.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Data.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Data.db
deleted file mode 100644
index f14d86d..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Index.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Index.db
deleted file mode 100644
index 5d71315..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmp-ka-4-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Data.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Data.db
deleted file mode 100644
index f14d86d..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Data.db and /dev/null differ


[03/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Index.db b/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Index.db
deleted file mode 100644
index 5d71315..0000000
Binary files a/test/data/migration-sstables/2.1/test/foo-0094ac203e7411e59149ef9f87394ca6/test-foo-tmplink-ka-4-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-CompressionInfo.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-CompressionInfo.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-CompressionInfo.db
deleted file mode 100644
index f7a81f0..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-CompressionInfo.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Data.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Data.db
deleted file mode 100644
index 2d5e60a..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Digest.adler32
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Digest.adler32 b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Digest.adler32
deleted file mode 100644
index deffbd1..0000000
--- a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Digest.adler32
+++ /dev/null
@@ -1 +0,0 @@
-2055934203
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Filter.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Filter.db
deleted file mode 100644
index a749417..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Index.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Index.db
deleted file mode 100644
index d3923ab..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Statistics.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Statistics.db
deleted file mode 100644
index 664bfa5..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Summary.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Summary.db
deleted file mode 100644
index a74f96f..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-TOC.txt b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-TOC.txt
deleted file mode 100644
index 92dc9fe..0000000
--- a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/la-1-big-TOC.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Statistics.db
-Summary.db
-TOC.txt
-Filter.db
-Data.db
-CompressionInfo.db
-Digest.adler32
-Index.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Data.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Data.db
deleted file mode 100644
index 2d5e60a..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Index.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Index.db
deleted file mode 100644
index d3923ab..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-la-2-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Data.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Data.db
deleted file mode 100644
index 2d5e60a..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Index.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Index.db
deleted file mode 100644
index d3923ab..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmp-lb-3-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Data.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Data.db
deleted file mode 100644
index 2d5e60a..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Index.db b/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Index.db
deleted file mode 100644
index d3923ab..0000000
Binary files a/test/data/migration-sstables/2.2/keyspace1/test-dfcc85801bc811e5aa694b06169f4ffa/tmplink-la-2-big-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435108403246-compactions_in_progress/manifest.json
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435108403246-compactions_in_progress/manifest.json b/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435108403246-compactions_in_progress/manifest.json
deleted file mode 100644
index d5fdb4f..0000000
--- a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435108403246-compactions_in_progress/manifest.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":[]}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241281-upgrade-3.0.0-SNAPSHOT-2.2.0-rc1-SNAPSHOT/manifest.json
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241281-upgrade-3.0.0-SNAPSHOT-2.2.0-rc1-SNAPSHOT/manifest.json b/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241281-upgrade-3.0.0-SNAPSHOT-2.2.0-rc1-SNAPSHOT/manifest.json
deleted file mode 100644
index d5fdb4f..0000000
--- a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241281-upgrade-3.0.0-SNAPSHOT-2.2.0-rc1-SNAPSHOT/manifest.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":[]}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241532-compactions_in_progress/manifest.json
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241532-compactions_in_progress/manifest.json b/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241532-compactions_in_progress/manifest.json
deleted file mode 100644
index d5fdb4f..0000000
--- a/test/data/migration-sstables/2.2/system/compactions_in_progress-55080ab05d9c388690a4acb25fe1f77b/snapshots/1435298241532-compactions_in_progress/manifest.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":[]}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Data.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Data.db
deleted file mode 100644
index 98d3f41..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Digest.sha1 b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Digest.sha1
deleted file mode 100644
index 470b056..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9ee805b905aa147afe14d4f37f5ed3be3af53c72  Keyspace1-legacyleveled-ic-0-Data.db
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Filter.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Filter.db
deleted file mode 100644
index c63729b..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Index.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Index.db
deleted file mode 100644
index 6603018..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Statistics.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Statistics.db
deleted file mode 100644
index 5ed9ce0..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Summary.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Summary.db
deleted file mode 100644
index c1c8fd8..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-TOC.txt b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-TOC.txt
deleted file mode 100644
index 6baaf14..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-0-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Filter.db
-Summary.db
-Data.db
-Digest.sha1
-Index.db
-TOC.txt
-Statistics.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Data.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Data.db
deleted file mode 100644
index 98d3f41..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Digest.sha1 b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Digest.sha1
deleted file mode 100644
index d8db723..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9ee805b905aa147afe14d4f37f5ed3be3af53c72  Keyspace1-legacyleveled-ic-1-Data.db
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Filter.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Filter.db
deleted file mode 100644
index c63729b..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Index.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Index.db
deleted file mode 100644
index 6603018..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Statistics.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Statistics.db
deleted file mode 100644
index 5ed9ce0..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Summary.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Summary.db
deleted file mode 100644
index c1c8fd8..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-TOC.txt b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-TOC.txt
deleted file mode 100644
index 6baaf14..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-1-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Filter.db
-Summary.db
-Data.db
-Digest.sha1
-Index.db
-TOC.txt
-Statistics.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Data.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Data.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Data.db
deleted file mode 100644
index 98d3f41..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Data.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Digest.sha1
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Digest.sha1 b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Digest.sha1
deleted file mode 100644
index 31da1c4..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Digest.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9ee805b905aa147afe14d4f37f5ed3be3af53c72  Keyspace1-legacyleveled-ic-2-Data.db
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Filter.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Filter.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Filter.db
deleted file mode 100644
index c63729b..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Filter.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Index.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Index.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Index.db
deleted file mode 100644
index 6603018..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Index.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Statistics.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Statistics.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Statistics.db
deleted file mode 100644
index 5ed9ce0..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Statistics.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Summary.db
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Summary.db b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Summary.db
deleted file mode 100644
index c1c8fd8..0000000
Binary files a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-Summary.db and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-TOC.txt
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-TOC.txt b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-TOC.txt
deleted file mode 100644
index 6baaf14..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/Keyspace1-legacyleveled-ic-2-TOC.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Filter.db
-Summary.db
-Data.db
-Digest.sha1
-Index.db
-TOC.txt
-Statistics.db

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/data/migration-sstables/ic/Keyspace1/legacyleveled/legacyleveled.json
----------------------------------------------------------------------
diff --git a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/legacyleveled.json b/test/data/migration-sstables/ic/Keyspace1/legacyleveled/legacyleveled.json
deleted file mode 100644
index 1fc9c01..0000000
--- a/test/data/migration-sstables/ic/Keyspace1/legacyleveled/legacyleveled.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
-  "generations" : [ {
-    "generation" : 0,
-    "members" : [ 0 ]
-  }, {
-    "generation" : 1,
-    "members" : [ 1 ]
-  }, {
-    "generation" : 2,
-    "members" : [ 2 ]
-  }, {
-    "generation" : 3,
-    "members" : [ ]
-  }, {
-    "generation" : 4,
-    "members" : [ ]
-  }, {
-    "generation" : 5,
-    "members" : [ ]
-  }, {
-    "generation" : 6,
-    "members" : [ ]
-  }, {
-    "generation" : 7,
-    "members" : [ ]
-  } ]
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/long/org/apache/cassandra/cql3/ViewLongTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/cql3/ViewLongTest.java b/test/long/org/apache/cassandra/cql3/ViewLongTest.java
index a5d17ea..d6bebc6 100644
--- a/test/long/org/apache/cassandra/cql3/ViewLongTest.java
+++ b/test/long/org/apache/cassandra/cql3/ViewLongTest.java
@@ -128,7 +128,7 @@ public class ViewLongTest extends CQLTester
 
         for (int i = 0; i < writers * insertsPerWriter; i++)
         {
-            if (executeNet(protocolVersion, "SELECT COUNT(*) FROM system.batchlog").one().getLong(0) == 0)
+            if (executeNet(protocolVersion, "SELECT COUNT(*) FROM system.batches").one().getLong(0) == 0)
                 break;
             try
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/long/org/apache/cassandra/utils/LongBloomFilterTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/utils/LongBloomFilterTest.java b/test/long/org/apache/cassandra/utils/LongBloomFilterTest.java
index c50296d..10dd5a6 100644
--- a/test/long/org/apache/cassandra/utils/LongBloomFilterTest.java
+++ b/test/long/org/apache/cassandra/utils/LongBloomFilterTest.java
@@ -38,33 +38,23 @@ public class LongBloomFilterTest
     @Test
     public void testBigInt()
     {
-        testBigInt(false);
-        testBigInt(true);
-    }
-    private static void testBigInt(boolean oldBfHashOrder)
-    {
         int size = 10 * 1000 * 1000;
-        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false, oldBfHashOrder);
+        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false);
         double fp = testFalsePositives(bf,
                                        new KeyGenerator.IntGenerator(size),
                                        new KeyGenerator.IntGenerator(size, size * 2));
-        logger.info("Bloom filter false positive for oldBfHashOrder={}: {}", oldBfHashOrder, fp);
+        logger.info("Bloom filter false positive: {}", fp);
     }
 
     @Test
     public void testBigRandom()
     {
-        testBigRandom(false);
-        testBigRandom(true);
-    }
-    private static void testBigRandom(boolean oldBfHashOrder)
-    {
         int size = 10 * 1000 * 1000;
-        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false, oldBfHashOrder);
+        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false);
         double fp = testFalsePositives(bf,
                                        new KeyGenerator.RandomStringGenerator(new Random().nextInt(), size),
                                        new KeyGenerator.RandomStringGenerator(new Random().nextInt(), size));
-        logger.info("Bloom filter false positive for oldBfHashOrder={}: {}", oldBfHashOrder, fp);
+        logger.info("Bloom filter false positive: {}", fp);
     }
 
     /**
@@ -73,26 +63,21 @@ public class LongBloomFilterTest
     @Test
     public void testConstrained()
     {
-        testConstrained(false);
-        testConstrained(true);
-    }
-    private static void testConstrained(boolean oldBfHashOrder)
-    {
         int size = 10 * 1000 * 1000;
-        try (IFilter bf = getFilter(size, 0.01, false, oldBfHashOrder))
+        try (IFilter bf = getFilter(size, 0.01, false))
         {
             double fp = testFalsePositives(bf,
                                            new KeyGenerator.IntGenerator(size),
                                            new KeyGenerator.IntGenerator(size, size * 2));
-            logger.info("Bloom filter false positive for oldBfHashOrder={}: {}", oldBfHashOrder, fp);
+            logger.info("Bloom filter false positive: {}", fp);
         }
     }
 
-    private static void testConstrained(double targetFp, int elements, boolean oldBfHashOrder, int staticBitCount, long ... staticBits)
+    private static void testConstrained(double targetFp, int elements, int staticBitCount, long ... staticBits)
     {
         for (long bits : staticBits)
         {
-            try (IFilter bf = getFilter(elements, targetFp, false, oldBfHashOrder);)
+            try (IFilter bf = getFilter(elements, targetFp, false);)
             {
                 SequentialHashGenerator gen = new SequentialHashGenerator(staticBitCount, bits);
                 long[] hash = new long[2];
@@ -131,23 +116,17 @@ public class LongBloomFilterTest
     @Test
     public void testBffp()
     {
-        bffp(false);
-        bffp(true);
-    }
-
-    private static void bffp(boolean flipInputs)
-    {
-        System.out.println("Bloom filter false posiitive with flipInputs=" + flipInputs);
+        System.out.println("Bloom filter false posiitive");
         long[] staticBits = staticBits(4, 0);
-        testConstrained(0.01d, 10 << 20, flipInputs, 0, staticBits);
-        testConstrained(0.01d, 1 << 20, flipInputs, 6, staticBits);
-        testConstrained(0.01d, 10 << 20, flipInputs, 6, staticBits);
-        testConstrained(0.01d, 1 << 19, flipInputs, 10, staticBits);
-        testConstrained(0.01d, 1 << 20, flipInputs, 10, staticBits);
-        testConstrained(0.01d, 10 << 20, flipInputs, 10, staticBits);
-        testConstrained(0.1d, 10 << 20, flipInputs, 0, staticBits);
-        testConstrained(0.1d, 10 << 20, flipInputs, 8, staticBits);
-        testConstrained(0.1d, 10 << 20, flipInputs, 10, staticBits);
+        testConstrained(0.01d, 10 << 20, 0, staticBits);
+        testConstrained(0.01d, 1 << 20, 6, staticBits);
+        testConstrained(0.01d, 10 << 20, 6, staticBits);
+        testConstrained(0.01d, 1 << 19, 10, staticBits);
+        testConstrained(0.01d, 1 << 20, 10, staticBits);
+        testConstrained(0.01d, 10 << 20, 10, staticBits);
+        testConstrained(0.1d, 10 << 20, 0, staticBits);
+        testConstrained(0.1d, 10 << 20, 8, staticBits);
+        testConstrained(0.1d, 10 << 20, 10, staticBits);
     }
 
     static long[] staticBits(int random, long ... fixed)
@@ -180,13 +159,8 @@ public class LongBloomFilterTest
     @Test
     public void timeit()
     {
-        timeit(false);
-        timeit(true);
-    }
-    private static void timeit(boolean oldBfHashOrder)
-    {
         int size = 300 * FilterTestHelper.ELEMENTS;
-        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false, oldBfHashOrder);
+        IFilter bf = getFilter(size, FilterTestHelper.spec.bucketsPerElement, false);
         double sumfp = 0;
         for (int i = 0; i < 10; i++)
         {
@@ -196,6 +170,6 @@ public class LongBloomFilterTest
 
             bf.clear();
         }
-        logger.info("Bloom filter mean false positive for oldBfHashOrder={}: {}", oldBfHashOrder, sumfp / 10);
+        logger.info("Bloom filter mean false positive: {}", sumfp / 10);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
index 3a1f348..04cb083 100644
--- a/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
+++ b/test/unit/org/apache/cassandra/AbstractSerializationsTester.java
@@ -39,12 +39,6 @@ public class AbstractSerializationsTester
     protected static final String CUR_VER = System.getProperty("cassandra.version", "3.0");
     protected static final Map<String, Integer> VERSION_MAP = new HashMap<String, Integer> ()
     {{
-        put("0.7", 1);
-        put("1.0", 3);
-        put("1.2", MessagingService.VERSION_12);
-        put("2.0", MessagingService.VERSION_20);
-        put("2.1", MessagingService.VERSION_21);
-        put("2.2", MessagingService.VERSION_22);
         put("3.0", MessagingService.VERSION_30);
     }};
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/batchlog/BatchTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/batchlog/BatchTest.java b/test/unit/org/apache/cassandra/batchlog/BatchTest.java
index 4e64ec6..d2db9b9 100644
--- a/test/unit/org/apache/cassandra/batchlog/BatchTest.java
+++ b/test/unit/org/apache/cassandra/batchlog/BatchTest.java
@@ -106,63 +106,4 @@ public class BatchTest
             }
         }
     }
-
-    /**
-     * This is just to test decodeMutations() when deserializing,
-     * since Batch will never be serialized at a version 2.2.
-     * @throws IOException
-     */
-    @Test
-    public void testSerializationNonCurrentVersion() throws IOException
-    {
-        CFMetaData cfm = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF_STANDARD).metadata;
-
-        long now = FBUtilities.timestampMicros();
-        int version = MessagingService.VERSION_22;
-        UUID uuid = UUIDGen.getTimeUUID();
-
-        List<Mutation> mutations = new ArrayList<>(10);
-        for (int i = 0; i < 10; i++)
-        {
-            mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), bytes(i))
-                          .clustering("name" + i)
-                          .add("val", "val" + i)
-                          .build());
-        }
-
-        Batch batch1 = Batch.createLocal(uuid, now, mutations);
-        assertEquals(uuid, batch1.id);
-        assertEquals(now, batch1.creationTime);
-        assertEquals(mutations, batch1.decodedMutations);
-
-        DataOutputBuffer out = new DataOutputBuffer();
-        Batch.serializer.serialize(batch1, out, version);
-
-        assertEquals(out.getLength(), Batch.serializer.serializedSize(batch1, version));
-
-        DataInputPlus dis = new DataInputBuffer(out.getData());
-        Batch batch2 = Batch.serializer.deserialize(dis, version);
-
-        assertEquals(batch1.id, batch2.id);
-        assertEquals(batch1.creationTime, batch2.creationTime);
-        assertEquals(batch1.decodedMutations.size(), batch2.decodedMutations.size());
-
-        Iterator<Mutation> it1 = batch1.decodedMutations.iterator();
-        Iterator<Mutation> it2 = batch2.decodedMutations.iterator();
-        while (it1.hasNext())
-        {
-            // We can't simply test the equality of both mutation string representation, that is do:
-            //   assertEquals(it1.next().toString(), it2.next().toString());
-            // because when deserializing from the old format, the returned iterator will always have it's 'columns()'
-            // method return all the table columns (no matter what's the actual content), and the table contains a
-            // 'val0' column we're not setting in that test.
-            //
-            // And it's actually not easy to fix legacy deserialization as we'd need to know which columns are actually
-            // set upfront, which would require use to iterate over the whole content first, which would be costly. And
-            // as the result of 'columns()' is only meant as a superset of the columns in the iterator, we don't bother.
-            Mutation mut1 = it1.next();
-            Mutation mut2 = it2.next();
-            assertTrue(mut1 + " != " + mut2, Util.sameContent(mut1, mut2));
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java b/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
index f192bcf..038255b 100644
--- a/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
+++ b/test/unit/org/apache/cassandra/batchlog/BatchlogManagerTest.java
@@ -100,7 +100,6 @@ public class BatchlogManagerTest
         metadata.updateNormalToken(Util.token("A"), localhost);
         metadata.updateHostId(UUIDGen.getTimeUUID(), localhost);
         Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).truncateBlocking();
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG).truncateBlocking();
     }
 
     @Test
@@ -129,19 +128,8 @@ public class BatchlogManagerTest
     }
 
     @Test
-    public void testReplay() throws Exception
-    {
-        testReplay(false);
-    }
-
-    @Test
-    public void testLegacyReplay() throws Exception
-    {
-        testReplay(true);
-    }
-
     @SuppressWarnings("deprecation")
-    private static void testReplay(boolean legacy) throws Exception
+    public void testReplay() throws Exception
     {
         long initialAllBatches = BatchlogManager.instance.countAllBatches();
         long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
@@ -165,16 +153,7 @@ public class BatchlogManagerTest
                            ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout())
                            : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
 
-            if (legacy)
-                LegacyBatchlogMigrator.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations), MessagingService.current_version);
-            else
-                BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
-        }
-
-        if (legacy)
-        {
-            Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG).forceBlockingFlush();
-            LegacyBatchlogMigrator.migrate();
+            BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
         }
 
         // Flush the batchlog to disk (see CASSANDRA-6822).
@@ -295,104 +274,6 @@ public class BatchlogManagerTest
     }
 
     @Test
-    @SuppressWarnings("deprecation")
-    public void testConversion() throws Exception
-    {
-        long initialAllBatches = BatchlogManager.instance.countAllBatches();
-        long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
-        CFMetaData cfm = Schema.instance.getCFMetaData(KEYSPACE1, CF_STANDARD4);
-
-        // Generate 1400 version 2.0 mutations and put them all into the batchlog.
-        // Half ready to be replayed, half not.
-        for (int i = 0; i < 1400; i++)
-        {
-            Mutation mutation = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i))
-                .clustering("name" + i)
-                .add("val", "val" + i)
-                .build();
-
-            long timestamp = i < 700
-                           ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout())
-                           : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
-
-
-            Mutation batchMutation = LegacyBatchlogMigrator.getStoreMutation(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i),
-                                                                                               TimeUnit.MILLISECONDS.toMicros(timestamp),
-                                                                                               Collections.singleton(mutation)),
-                                                                             MessagingService.VERSION_20);
-            assertTrue(LegacyBatchlogMigrator.isLegacyBatchlogMutation(batchMutation));
-            LegacyBatchlogMigrator.handleLegacyMutation(batchMutation);
-        }
-
-        // Mix in 100 current version mutations, 50 ready for replay.
-        for (int i = 1400; i < 1500; i++)
-        {
-            Mutation mutation = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i))
-                .clustering("name" + i)
-                .add("val", "val" + i)
-                .build();
-
-            long timestamp = i < 1450
-                           ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout())
-                           : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
-
-
-            BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i),
-                                                    FBUtilities.timestampMicros(),
-                                                    Collections.singleton(mutation)));
-        }
-
-        // Flush the batchlog to disk (see CASSANDRA-6822).
-        Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
-
-        assertEquals(1500, BatchlogManager.instance.countAllBatches() - initialAllBatches);
-        assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
-
-        UntypedResultSet result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG));
-        assertNotNull(result);
-        assertEquals("Count in blog legacy", 0, result.one().getLong("count"));
-        result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES));
-        assertNotNull(result);
-        assertEquals("Count in blog", 1500, result.one().getLong("count"));
-
-        // Force batchlog replay and wait for it to complete.
-        BatchlogManager.instance.performInitialReplay();
-
-        // Ensure that the first half, and only the first half, got replayed.
-        assertEquals(750, BatchlogManager.instance.countAllBatches() - initialAllBatches);
-        assertEquals(750, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
-
-        for (int i = 0; i < 1500; i++)
-        {
-            result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD4, i));
-            assertNotNull(result);
-            if (i < 700 || i >= 1400 && i < 1450)
-            {
-                assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
-                assertEquals("name" + i, result.one().getString("name"));
-                assertEquals("val" + i, result.one().getString("val"));
-            }
-            else
-            {
-                assertTrue("Present at " + i, result.isEmpty());
-            }
-        }
-
-        // Ensure that no stray mutations got somehow applied.
-        result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", KEYSPACE1, CF_STANDARD4));
-        assertNotNull(result);
-        assertEquals(750, result.one().getLong("count"));
-
-        // Ensure batchlog is left as expected.
-        result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES));
-        assertNotNull(result);
-        assertEquals("Count in blog after initial replay", 750, result.one().getLong("count"));
-        result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG));
-        assertNotNull(result);
-        assertEquals("Count in blog legacy after initial replay ", 0, result.one().getLong("count"));
-    }
-
-    @Test
     public void testAddBatch() throws IOException
     {
         long initialAllBatches = BatchlogManager.instance.countAllBatches();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cache/CacheProviderTest.java b/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
index a4173d6..cd52d35 100644
--- a/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
+++ b/test/unit/org/apache/cassandra/cache/CacheProviderTest.java
@@ -103,8 +103,8 @@ public class CacheProviderTest
         {
             MessageDigest d1 = MessageDigest.getInstance("MD5");
             MessageDigest d2 = MessageDigest.getInstance("MD5");
-            UnfilteredRowIterators.digest(null, ((CachedBTreePartition) one).unfilteredIterator(), d1, MessagingService.current_version);
-            UnfilteredRowIterators.digest(null, ((CachedBTreePartition) two).unfilteredIterator(), d2, MessagingService.current_version);
+            UnfilteredRowIterators.digest(((CachedBTreePartition) one).unfilteredIterator(), d1, MessagingService.current_version);
+            UnfilteredRowIterators.digest(((CachedBTreePartition) two).unfilteredIterator(), d2, MessagingService.current_version);
             assertTrue(MessageDigest.isEqual(d1.digest(), d2.digest()));
         }
         catch (NoSuchAlgorithmException e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/cql3/CQLTester.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/CQLTester.java b/test/unit/org/apache/cassandra/cql3/CQLTester.java
index 0ac5628..04ee5df 100644
--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@ -197,6 +197,7 @@ public abstract class CQLTester
         ThreadAwareSecurityManager.install();
 
         Keyspace.setInitialized();
+        SystemKeyspace.persistLocalMetadata();
         isServerPrepared = true;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/PartitionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/PartitionTest.java b/test/unit/org/apache/cassandra/db/PartitionTest.java
index 7216ab7..7f44d51 100644
--- a/test/unit/org/apache/cassandra/db/PartitionTest.java
+++ b/test/unit/org/apache/cassandra/db/PartitionTest.java
@@ -119,12 +119,6 @@ public class PartitionTest
         testDigest(MessagingService.current_version);
     }
 
-    @Test
-    public void testLegacyDigest() throws NoSuchAlgorithmException
-    {
-        testDigest(MessagingService.VERSION_22);
-    }
-
     public void testDigest(int version) throws NoSuchAlgorithmException
     {
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_TENCOL);
@@ -145,16 +139,16 @@ public class PartitionTest
 
             MessageDigest digest1 = MessageDigest.getInstance("MD5");
             MessageDigest digest2 = MessageDigest.getInstance("MD5");
-            UnfilteredRowIterators.digest(cmd1, p1.unfilteredIterator(), digest1, version);
-            UnfilteredRowIterators.digest(cmd2, p2.unfilteredIterator(), digest2, version);
+            UnfilteredRowIterators.digest(p1.unfilteredIterator(), digest1, version);
+            UnfilteredRowIterators.digest(p2.unfilteredIterator(), digest2, version);
             assertFalse(Arrays.equals(digest1.digest(), digest2.digest()));
 
             p1 = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, "key2").build());
             p2 = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, "key2").build());
             digest1 = MessageDigest.getInstance("MD5");
             digest2 = MessageDigest.getInstance("MD5");
-            UnfilteredRowIterators.digest(cmd1, p1.unfilteredIterator(), digest1, version);
-            UnfilteredRowIterators.digest(cmd2, p2.unfilteredIterator(), digest2, version);
+            UnfilteredRowIterators.digest(p1.unfilteredIterator(), digest1, version);
+            UnfilteredRowIterators.digest(p2.unfilteredIterator(), digest2, version);
             assertTrue(Arrays.equals(digest1.digest(), digest2.digest()));
 
             p1 = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, "key2").build());
@@ -162,8 +156,8 @@ public class PartitionTest
             p2 = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, "key2").build());
             digest1 = MessageDigest.getInstance("MD5");
             digest2 = MessageDigest.getInstance("MD5");
-            UnfilteredRowIterators.digest(cmd1, p1.unfilteredIterator(), digest1, version);
-            UnfilteredRowIterators.digest(cmd2, p2.unfilteredIterator(), digest2, version);
+            UnfilteredRowIterators.digest(p1.unfilteredIterator(), digest1, version);
+            UnfilteredRowIterators.digest(p2.unfilteredIterator(), digest2, version);
             assertFalse(Arrays.equals(digest1.digest(), digest2.digest()));
         }
         finally

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/ReadResponseTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ReadResponseTest.java b/test/unit/org/apache/cassandra/db/ReadResponseTest.java
deleted file mode 100644
index 52ab8bb..0000000
--- a/test/unit/org/apache/cassandra/db/ReadResponseTest.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.util.*;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.cql3.CQLTester;
-import org.apache.cassandra.db.rows.Rows;
-import org.apache.cassandra.db.rows.UnfilteredRowIterators;
-import org.apache.cassandra.db.partitions.ImmutableBTreePartition;
-import org.apache.cassandra.db.partitions.UnfilteredPartitionIterator;
-import org.apache.cassandra.db.marshal.AsciiType;
-import org.apache.cassandra.dht.ByteOrderedPartitioner;
-import org.apache.cassandra.dht.IPartitioner;
-
-import static org.junit.Assert.assertEquals;
-
-public class ReadResponseTest extends CQLTester
-{
-    private IPartitioner partitionerToRestore;
-
-    @Before
-    public void setupPartitioner()
-    {
-        // Using an ordered partitioner to be able to predict keys order in the following tests.
-        partitionerToRestore = DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
-    }
-
-    @After
-    public void resetPartitioner()
-    {
-        DatabaseDescriptor.setPartitionerUnsafe(partitionerToRestore);
-    }
-
-    @Test
-    public void testLegacyResponseSkipWrongBounds()
-    {
-        createTable("CREATE TABLE %s (k text PRIMARY KEY)");
-
-        ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
-
-        // Test that if a legacy response contains keys at the boundary of the requested key range that shouldn't be present, those
-        // are properly skipped. See CASSANDRA-9857 for context.
-
-        List<ImmutableBTreePartition> responses = Arrays.asList(makePartition(cfs.metadata, "k1"),
-                                                                makePartition(cfs.metadata, "k2"),
-                                                                makePartition(cfs.metadata, "k3"));
-        ReadResponse.LegacyRemoteDataResponse response = new ReadResponse.LegacyRemoteDataResponse(responses);
-
-        assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k1").toKeyExcl("k3").build()), "k2");
-        assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k0").toKeyExcl("k3").build()), "k1", "k2");
-        assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyExcl("k1").toKeyExcl("k4").build()), "k2", "k3");
-
-        assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyIncl("k1").toKeyExcl("k3").build()), "k1", "k2");
-        assertPartitions(response.makeIterator(Util.cmd(cfs).fromKeyIncl("k1").toKeyExcl("k4").build()), "k1", "k2", "k3");
-    }
-
-    private void assertPartitions(UnfilteredPartitionIterator actual, String... expectedKeys)
-    {
-        int i = 0;
-        while (i < expectedKeys.length && actual.hasNext())
-        {
-            String actualKey = AsciiType.instance.getString(actual.next().partitionKey().getKey());
-            assertEquals(expectedKeys[i++], actualKey);
-        }
-
-        if (i < expectedKeys.length)
-            throw new AssertionError("Got less results than expected: " + expectedKeys[i] + " is not in the result");
-        if (actual.hasNext())
-            throw new AssertionError("Got more results than expected: first unexpected key is " + AsciiType.instance.getString(actual.next().partitionKey().getKey()));
-    }
-
-    private static ImmutableBTreePartition makePartition(CFMetaData metadata, String key)
-    {
-        return ImmutableBTreePartition.create(UnfilteredRowIterators.noRowsIterator(metadata, Util.dk(key), Rows.EMPTY_STATIC_ROW, new DeletionTime(0, 0), false));
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
index 6c8eed5..69e1423 100644
--- a/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
+++ b/test/unit/org/apache/cassandra/db/RowIndexEntryTest.java
@@ -256,7 +256,7 @@ public class RowIndexEntryTest extends CQLTester
                                               Collection<SSTableFlushObserver> observers,
                                               Version version) throws IOException
         {
-            assert !iterator.isEmpty() && version.storeRows();
+            assert !iterator.isEmpty();
 
             Builder builder = new Builder(iterator, output, header, observers, version.correspondingMessagingVersion());
             return builder.build();
@@ -422,7 +422,7 @@ public class RowIndexEntryTest extends CQLTester
         SequentialWriter writer = new SequentialWriter(tempFile);
         ColumnIndex columnIndex = RowIndexEntryTest.ColumnIndex.writeAndBuildIndex(partition.unfilteredIterator(), writer, header, Collections.emptySet(), BigFormat.latestVersion);
         Pre_C_11206_RowIndexEntry withIndex = Pre_C_11206_RowIndexEntry.create(0xdeadbeef, DeletionTime.LIVE, columnIndex);
-        IndexInfo.Serializer indexSerializer = cfs.metadata.serializers().indexInfoSerializer(BigFormat.latestVersion, header);
+        IndexInfo.Serializer indexSerializer = IndexInfo.serializer(BigFormat.latestVersion, header);
 
         // sanity check
         assertTrue(columnIndex.columnsIndex.size() >= 3);
@@ -567,14 +567,12 @@ public class RowIndexEntryTest extends CQLTester
 
             Serializer(CFMetaData metadata, Version version, SerializationHeader header)
             {
-                this.idxSerializer = metadata.serializers().indexInfoSerializer(version, header);
+                this.idxSerializer = IndexInfo.serializer(version, header);
                 this.version = version;
             }
 
             public void serialize(Pre_C_11206_RowIndexEntry rie, DataOutputPlus out) throws IOException
             {
-                assert version.storeRows() : "We read old index files but we should never write them";
-
                 out.writeUnsignedVInt(rie.position);
                 out.writeUnsignedVInt(rie.promotedSize(idxSerializer));
 
@@ -622,35 +620,6 @@ public class RowIndexEntryTest extends CQLTester
 
             public Pre_C_11206_RowIndexEntry deserialize(DataInputPlus in) throws IOException
             {
-                if (!version.storeRows())
-                {
-                    long position = in.readLong();
-
-                    int size = in.readInt();
-                    if (size > 0)
-                    {
-                        DeletionTime deletionTime = DeletionTime.serializer.deserialize(in);
-
-                        int entries = in.readInt();
-                        List<IndexInfo> columnsIndex = new ArrayList<>(entries);
-
-                        long headerLength = 0L;
-                        for (int i = 0; i < entries; i++)
-                        {
-                            IndexInfo info = idxSerializer.deserialize(in);
-                            columnsIndex.add(info);
-                            if (i == 0)
-                                headerLength = info.offset;
-                        }
-
-                        return new Pre_C_11206_RowIndexEntry.IndexedEntry(position, deletionTime, headerLength, columnsIndex);
-                    }
-                    else
-                    {
-                        return new Pre_C_11206_RowIndexEntry(position);
-                    }
-                }
-
                 long position = in.readUnsignedVInt();
 
                 int size = (int)in.readUnsignedVInt();
@@ -678,7 +647,7 @@ public class RowIndexEntryTest extends CQLTester
             // should be used instead.
             static long readPosition(DataInputPlus in, Version version) throws IOException
             {
-                return version.storeRows() ? in.readUnsignedVInt() : in.readLong();
+                return in.readUnsignedVInt();
             }
 
             public static void skip(DataInputPlus in, Version version) throws IOException
@@ -689,7 +658,7 @@ public class RowIndexEntryTest extends CQLTester
 
             private static void skipPromotedIndex(DataInputPlus in, Version version) throws IOException
             {
-                int size = version.storeRows() ? (int)in.readUnsignedVInt() : in.readInt();
+                int size = (int)in.readUnsignedVInt();
                 if (size <= 0)
                     return;
 
@@ -698,8 +667,6 @@ public class RowIndexEntryTest extends CQLTester
 
             public int serializedSize(Pre_C_11206_RowIndexEntry rie)
             {
-                assert version.storeRows() : "We read old index files but we should never write them";
-
                 int indexedSize = 0;
                 if (rie.isIndexed())
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 7e7e145..93ac46e 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -327,8 +327,7 @@ public class ScrubTest
             cfs.clearUnsafe();
 
             List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
-            String filename = cfs.getSSTablePath(tempDataDir);
-            Descriptor desc = Descriptor.fromFilename(filename);
+            Descriptor desc = cfs.newSSTableDescriptor(tempDataDir);
 
             LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
             try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn)))
@@ -709,41 +708,4 @@ public class ScrubTest
         rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".cf_with_duplicates_3_0", KEYSPACE));
         assertEquals(0, rs.size());
     }
-
-    @Test
-    public void testUpgradeSstablesWithDuplicates() throws Exception
-    {
-        DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance);
-        String cf = "cf_with_duplicates_2_0";
-        QueryProcessor.process(String.format("CREATE TABLE \"%s\".%s (a int, b int, c int, PRIMARY KEY (a, b))", KEYSPACE, cf), ConsistencyLevel.ONE);
-
-        Keyspace keyspace = Keyspace.open(KEYSPACE);
-        ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cf);
-
-        Path legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
-                                           "Keyspace1",
-                                           cf);
-
-        for (String filename : new String[]{ "lb-1-big-CompressionInfo.db",
-                                             "lb-1-big-Data.db",
-                                             "lb-1-big-Digest.adler32",
-                                             "lb-1-big-Filter.db",
-                                             "lb-1-big-Index.db",
-                                             "lb-1-big-Statistics.db",
-                                             "lb-1-big-Summary.db",
-                                             "lb-1-big-TOC.txt" })
-        {
-            Files.copy(Paths.get(legacySSTableRoot.toString(), filename), cfs.getDirectories().getDirectoryForNewSSTables().toPath().resolve(filename));
-        }
-
-        cfs.loadNewSSTables();
-
-        cfs.sstablesRewrite(true, 1);
-
-        UntypedResultSet rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
-        assertEquals(1, rs.size());
-        QueryProcessor.executeInternal(String.format("DELETE FROM \"%s\".%s WHERE a=1 AND b =2", KEYSPACE, cf));
-        rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
-        assertEquals(0, rs.size());
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index 3c09c93..95582d5 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -96,61 +96,6 @@ public class SinglePartitionSliceCommandTest
         Keyspace.open(KEYSPACE).getColumnFamilyStore(TABLE).truncateBlocking();
     }
 
-    @Test
-    public void staticColumnsAreFiltered() throws IOException
-    {
-        DecoratedKey key = cfm.decorateKey(ByteBufferUtil.bytes("k"));
-
-        UntypedResultSet rows;
-
-        QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s, i, v) VALUES ('k', 's', 0, 'v')");
-        QueryProcessor.executeInternal("DELETE v FROM ks.tbl WHERE k='k' AND i=0");
-        QueryProcessor.executeInternal("DELETE FROM ks.tbl WHERE k='k' AND i=0");
-        rows = QueryProcessor.executeInternal("SELECT * FROM ks.tbl WHERE k='k' AND i=0");
-
-        for (UntypedResultSet.Row row: rows)
-        {
-            logger.debug("Current: k={}, s={}, v={}", (row.has("k") ? row.getString("k") : null), (row.has("s") ? row.getString("s") : null), (row.has("v") ? row.getString("v") : null));
-        }
-
-        assert rows.isEmpty();
-
-        ColumnFilter columnFilter = ColumnFilter.selection(PartitionColumns.of(v));
-        ByteBuffer zero = ByteBufferUtil.bytes(0);
-        Slices slices = Slices.with(cfm.comparator, Slice.make(ClusteringBound.inclusiveStartOf(zero), ClusteringBound.inclusiveEndOf(zero)));
-        ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(slices, false);
-        ReadCommand cmd = new SinglePartitionReadCommand(false, MessagingService.VERSION_30, true, cfm,
-                                                          FBUtilities.nowInSeconds(),
-                                                          columnFilter,
-                                                          RowFilter.NONE,
-                                                          DataLimits.NONE,
-                                                          key,
-                                                          sliceFilter);
-
-        DataOutputBuffer out = new DataOutputBuffer((int) ReadCommand.legacyReadCommandSerializer.serializedSize(cmd, MessagingService.VERSION_21));
-        ReadCommand.legacyReadCommandSerializer.serialize(cmd, out, MessagingService.VERSION_21);
-        DataInputPlus in = new DataInputBuffer(out.buffer(), true);
-        cmd = ReadCommand.legacyReadCommandSerializer.deserialize(in, MessagingService.VERSION_21);
-
-        logger.debug("ReadCommand: {}", cmd);
-        try (ReadExecutionController controller = cmd.executionController();
-             UnfilteredPartitionIterator partitionIterator = cmd.executeLocally(controller))
-        {
-            ReadResponse response = ReadResponse.createDataResponse(partitionIterator, cmd);
-
-            logger.debug("creating response: {}", response);
-            try (UnfilteredPartitionIterator pIter = response.makeIterator(cmd))
-            {
-                assert pIter.hasNext();
-                try (UnfilteredRowIterator partition = pIter.next())
-                {
-                    LegacyLayout.LegacyUnfilteredPartition rowIter = LegacyLayout.fromUnfilteredRowIterator(cmd, partition);
-                    Assert.assertEquals(Collections.emptyList(), rowIter.cells);
-                }
-            }
-        }
-    }
-
     private void checkForS(UnfilteredPartitionIterator pi)
     {
         Assert.assertTrue(pi.toString(), pi.hasNext());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
index e3e4d8a..d188821 100644
--- a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
@@ -45,8 +45,6 @@ import static org.junit.Assert.assertTrue;
 
 public class SystemKeyspaceTest
 {
-    public static final String MIGRATION_SSTABLES_ROOT = "migration-sstable-root";
-
     @BeforeClass
     public static void prepSnapshotTracker()
     {
@@ -155,81 +153,6 @@ public class SystemKeyspaceTest
         Keyspace.clearSnapshot(null, SchemaConstants.SYSTEM_KEYSPACE_NAME);
     }
 
-    @Test
-    public void testMigrateEmptyDataDirs() throws IOException
-    {
-        File dataDir = Paths.get(DatabaseDescriptor.getAllDataFileLocations()[0]).toFile();
-        if (new File(dataDir, "Emptykeyspace1").exists())
-            FileUtils.deleteDirectory(new File(dataDir, "Emptykeyspace1"));
-        assertTrue(new File(dataDir, "Emptykeyspace1").mkdirs());
-        assertEquals(0, numLegacyFiles());
-        SystemKeyspace.migrateDataDirs();
-        assertEquals(0, numLegacyFiles());
-
-        assertTrue(new File(dataDir, "Emptykeyspace1/table1").mkdirs());
-        assertEquals(0, numLegacyFiles());
-        SystemKeyspace.migrateDataDirs();
-        assertEquals(0, numLegacyFiles());
-
-        assertTrue(new File(dataDir, "Emptykeyspace1/wrong_file").createNewFile());
-        assertEquals(0, numLegacyFiles());
-        SystemKeyspace.migrateDataDirs();
-        assertEquals(0, numLegacyFiles());
-
-    }
-
-    @Test
-    public void testMigrateDataDirs_2_1() throws IOException
-    {
-        testMigrateDataDirs("2.1", 5); // see test data for num legacy files
-    }
-
-    @Test
-    public void testMigrateDataDirs_2_2() throws IOException
-    {
-        testMigrateDataDirs("2.2", 7); // see test data for num legacy files
-    }
-
-    private void testMigrateDataDirs(String version, int numLegacyFiles) throws IOException
-    {
-        Path migrationSSTableRoot = Paths.get(System.getProperty(MIGRATION_SSTABLES_ROOT), version);
-        Path dataDir = Paths.get(DatabaseDescriptor.getAllDataFileLocations()[0]);
-
-        FileUtils.copyDirectory(migrationSSTableRoot.toFile(), dataDir.toFile());
-
-        assertEquals(numLegacyFiles, numLegacyFiles());
-
-        SystemKeyspace.migrateDataDirs();
-
-        assertEquals(0, numLegacyFiles());
-    }
-
-    private static int numLegacyFiles()
-    {
-        int ret = 0;
-        Iterable<String> dirs = Arrays.asList(DatabaseDescriptor.getAllDataFileLocations());
-        for (String dataDir : dirs)
-        {
-            File dir = new File(dataDir);
-            for (File ksdir : dir.listFiles((d, n) -> new File(d, n).isDirectory()))
-            {
-                for (File cfdir : ksdir.listFiles((d, n) -> new File(d, n).isDirectory()))
-                {
-                    if (Descriptor.isLegacyFile(cfdir))
-                    {
-                        ret++;
-                    }
-                    else
-                    {
-                        File[] legacyFiles = cfdir.listFiles((d, n) -> Descriptor.isLegacyFile(new File(d, n)));
-                        ret += legacyFiles.length;
-                    }
-                }
-            }
-        }
-        return ret;
-    }
-
     private String getOlderVersionString()
     {
         String version = FBUtilities.getReleaseVersionString();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/VerifyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java
index 4e55a60..77096b9 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -31,6 +31,7 @@ import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.io.FSWriteError;
+import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileUtils;
@@ -275,11 +276,11 @@ public class VerifyTest
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
 
-        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(sstable.descriptor.digestComponent), "rw"))
+        try (RandomAccessFile file = new RandomAccessFile(sstable.descriptor.filenameFor(Component.DIGEST), "rw"))
         {
             Long correctChecksum = Long.valueOf(file.readLine());
     
-            writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(sstable.descriptor.digestComponent));
+            writeChecksum(++correctChecksum, sstable.descriptor.filenameFor(Component.DIGEST));
         }
 
         try (Verifier verifier = new Verifier(cfs, sstable, false))
@@ -318,7 +319,7 @@ public class VerifyTest
             ChunkCache.instance.invalidateFile(sstable.getFilename());
 
         // Update the Digest to have the right Checksum
-        writeChecksum(simpleFullChecksum(sstable.getFilename()), sstable.descriptor.filenameFor(sstable.descriptor.digestComponent));
+        writeChecksum(simpleFullChecksum(sstable.getFilename()), sstable.descriptor.filenameFor(Component.DIGEST));
 
         try (Verifier verifier = new Verifier(cfs, sstable, false))
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
index fdedafd..53c6769 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogDescriptorTest.java
@@ -106,10 +106,10 @@ public class CommitLogDescriptorTest
     public void testDescriptorPersistence() throws IOException
     {
         testDescriptorPersistence(new CommitLogDescriptor(11, null, neverEnabledEncryption));
-        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.VERSION_21, 13, null, neverEnabledEncryption));
-        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.VERSION_22, 15, null, neverEnabledEncryption));
-        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.VERSION_22, 17, new ParameterizedClass("LZ4Compressor", null), neverEnabledEncryption));
-        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.VERSION_22, 19,
+        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.current_version, 13, null, neverEnabledEncryption));
+        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.current_version, 15, null, neverEnabledEncryption));
+        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.current_version, 17, new ParameterizedClass("LZ4Compressor", null), neverEnabledEncryption));
+        testDescriptorPersistence(new CommitLogDescriptor(CommitLogDescriptor.current_version, 19,
                                                           new ParameterizedClass("StubbyCompressor", ImmutableMap.of("parameter1", "value1", "flag2", "55", "argument3", "null")
                                                           ), neverEnabledEncryption));
     }
@@ -122,7 +122,7 @@ public class CommitLogDescriptorTest
         for (int i=0; i<65535; ++i)
             params.put("key"+i, Integer.toString(i, 16));
         try {
-            CommitLogDescriptor desc = new CommitLogDescriptor(CommitLogDescriptor.VERSION_22,
+            CommitLogDescriptor desc = new CommitLogDescriptor(CommitLogDescriptor.current_version,
                                                                21,
                                                                new ParameterizedClass("LZ4Compressor", params),
                                                                neverEnabledEncryption);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
index 5476d03..19305ac 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
@@ -152,15 +152,9 @@ public class CommitLogTest
     }
 
     @Test
-    public void testRecoveryWithEmptyLog20() throws Exception
-    {
-        CommitLog.instance.recoverFiles(tmpFile(CommitLogDescriptor.VERSION_20));
-    }
-
-    @Test
     public void testRecoveryWithZeroLog() throws Exception
     {
-        testRecovery(new byte[10], null);
+        testRecovery(new byte[10], CommitLogReplayException.class);
     }
 
     @Test
@@ -174,7 +168,7 @@ public class CommitLogTest
     public void testRecoveryWithShortSize() throws Exception
     {
         runExpecting(() -> {
-            testRecovery(new byte[2], CommitLogDescriptor.VERSION_20);
+            testRecovery(new byte[2], CommitLogDescriptor.current_version);
             return null;
         }, CommitLogReplayException.class);
     }
@@ -531,8 +525,7 @@ public class CommitLogTest
     {
         ParameterizedClass commitLogCompression = DatabaseDescriptor.getCommitLogCompression();
         EncryptionContext encryptionContext = DatabaseDescriptor.getEncryptionContext();
-        runExpecting(() -> testRecovery(logData, CommitLogDescriptor.VERSION_20), expected);
-        runExpecting(() -> testRecovery(new CommitLogDescriptor(4, commitLogCompression, encryptionContext), logData), expected);
+        runExpecting(() -> testRecovery(logData, CommitLogDescriptor.current_version), expected);
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
index d55b59f..e25d2f1 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
@@ -98,83 +98,6 @@ public class CommitLogUpgradeTest
     }
 
     @Test
-    public void test20() throws Exception
-    {
-        testRestore(DATA_DIR + "2.0");
-    }
-
-    @Test
-    public void test21() throws Exception
-    {
-        testRestore(DATA_DIR + "2.1");
-    }
-
-    @Test
-    public void test22() throws Exception
-    {
-        testRestore(DATA_DIR + "2.2");
-    }
-
-    @Test
-    public void test22_LZ4() throws Exception
-    {
-        testRestore(DATA_DIR + "2.2-lz4");
-    }
-
-    @Test
-    public void test22_Snappy() throws Exception
-    {
-        testRestore(DATA_DIR + "2.2-snappy");
-    }
-
-    public void test22_truncated() throws Exception
-    {
-        testRestore(DATA_DIR + "2.2-lz4-truncated");
-    }
-
-    @Test(expected = CommitLogReplayException.class)
-    public void test22_bitrot() throws Exception
-    {
-        shouldBeKilled = true;
-        testRestore(DATA_DIR + "2.2-lz4-bitrot");
-    }
-
-    @Test
-    public void test22_bitrot_ignored() throws Exception
-    {
-        try
-        {
-            System.setProperty(CommitLogReplayer.IGNORE_REPLAY_ERRORS_PROPERTY, "true");
-            testRestore(DATA_DIR + "2.2-lz4-bitrot");
-        }
-        finally
-        {
-            System.clearProperty(CommitLogReplayer.IGNORE_REPLAY_ERRORS_PROPERTY);
-        }
-    }
-
-    @Test(expected = CommitLogReplayException.class)
-    public void test22_bitrot2() throws Exception
-    {
-        shouldBeKilled = true;
-        testRestore(DATA_DIR + "2.2-lz4-bitrot2");
-    }
-
-    @Test
-    public void test22_bitrot2_ignored() throws Exception
-    {
-        try
-        {
-            System.setProperty(CommitLogReplayer.IGNORE_REPLAY_ERRORS_PROPERTY, "true");
-            testRestore(DATA_DIR + "2.2-lz4-bitrot2");
-        }
-        finally
-        {
-            System.clearProperty(CommitLogReplayer.IGNORE_REPLAY_ERRORS_PROPERTY);
-        }
-    }
-
-    @Test
     public void test34_encrypted() throws Exception
     {
         testRestore(DATA_DIR + "3.4-encrypted");

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
index a2e2754..d46d07a 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
@@ -164,9 +164,9 @@ public class AntiCompactionTest
     private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
     {
         File dir = cfs.getDirectories().getDirectoryForNewSSTables();
-        String filename = cfs.getSSTablePath(dir);
+        Descriptor desc = cfs.newSSTableDescriptor(dir);
 
-        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, filename, 0, 0, new SerializationHeader(true, cfm, cfm.partitionColumns(), EncodingStats.NO_STATS)))
+        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 0, new SerializationHeader(true, cfm, cfm.partitionColumns(), EncodingStats.NO_STATS)))
         {
             for (int i = 0; i < count; i++)
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
index 595610e..b825f52 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
@@ -29,7 +29,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import junit.framework.Assert;
-import org.apache.cassandra.MockSchema;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
@@ -65,8 +64,6 @@ public class RealTransactionsTest extends SchemaLoader
     @BeforeClass
     public static void setUp()
     {
-        MockSchema.cleanup();
-
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE,
                                     KeyspaceParams.simple(1),
@@ -160,7 +157,7 @@ public class RealTransactionsTest extends SchemaLoader
             {
                 long lastCheckObsoletion = System.nanoTime();
                 File directory = txn.originals().iterator().next().descriptor.directory;
-                Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(directory));
+                Descriptor desc = cfs.newSSTableDescriptor(directory);
                 CFMetaData metadata = Schema.instance.getCFMetaData(desc);
                 rewriter.switchWriter(SSTableWriter.create(metadata,
                                                            desc,


[06/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
deleted file mode 100644
index d0fc151..0000000
--- a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
+++ /dev/null
@@ -1,1099 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.schema;
-
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.*;
-import org.apache.cassandra.cql3.ColumnIdentifier;
-import org.apache.cassandra.cql3.FieldIdentifier;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.cql3.functions.FunctionName;
-import org.apache.cassandra.cql3.functions.UDAggregate;
-import org.apache.cassandra.cql3.functions.UDFunction;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
-import org.apache.cassandra.db.marshal.*;
-import org.apache.cassandra.db.rows.RowIterator;
-import org.apache.cassandra.db.rows.UnfilteredRowIterators;
-import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.utils.FBUtilities;
-
-import static java.lang.String.format;
-import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
-import static org.apache.cassandra.utils.FBUtilities.fromJsonMap;
-
-/**
- * This majestic class performs migration from legacy (pre-3.0) system.schema_* schema tables to the new and glorious
- * system_schema keyspace.
- *
- * The goal is to not lose any information in the migration - including the timestamps.
- */
-@SuppressWarnings("deprecation")
-public final class LegacySchemaMigrator
-{
-    private LegacySchemaMigrator()
-    {
-    }
-
-    private static final Logger logger = LoggerFactory.getLogger(LegacySchemaMigrator.class);
-
-    static final List<CFMetaData> LegacySchemaTables =
-        ImmutableList.of(SystemKeyspace.LegacyKeyspaces,
-                         SystemKeyspace.LegacyColumnfamilies,
-                         SystemKeyspace.LegacyColumns,
-                         SystemKeyspace.LegacyTriggers,
-                         SystemKeyspace.LegacyUsertypes,
-                         SystemKeyspace.LegacyFunctions,
-                         SystemKeyspace.LegacyAggregates);
-
-    public static void migrate()
-    {
-        // read metadata from the legacy schema tables
-        Collection<Keyspace> keyspaces = readSchema();
-
-        // if already upgraded, or starting a new 3.0 node, abort early
-        if (keyspaces.isEmpty())
-        {
-            unloadLegacySchemaTables();
-            return;
-        }
-
-        // write metadata to the new schema tables
-        logger.info("Moving {} keyspaces from legacy schema tables to the new schema keyspace ({})",
-                    keyspaces.size(),
-                    SchemaConstants.SCHEMA_KEYSPACE_NAME);
-        keyspaces.forEach(LegacySchemaMigrator::storeKeyspaceInNewSchemaTables);
-        keyspaces.forEach(LegacySchemaMigrator::migrateBuiltIndexesForKeyspace);
-
-        // flush the new tables before truncating the old ones
-        SchemaKeyspace.flush();
-
-        // truncate the original tables (will be snapshotted now, and will have been snapshotted by pre-flight checks)
-        logger.info("Truncating legacy schema tables");
-        truncateLegacySchemaTables();
-
-        // remove legacy schema tables from Schema, so that their presence doesn't give the users any wrong ideas
-        unloadLegacySchemaTables();
-
-        logger.info("Completed migration of legacy schema tables");
-    }
-
-    private static void migrateBuiltIndexesForKeyspace(Keyspace keyspace)
-    {
-        keyspace.tables.forEach(LegacySchemaMigrator::migrateBuiltIndexesForTable);
-    }
-
-    private static void migrateBuiltIndexesForTable(Table table)
-    {
-        table.metadata.getIndexes().forEach((index) -> migrateIndexBuildStatus(table.metadata.ksName,
-                                                                               table.metadata.cfName,
-                                                                               index));
-    }
-
-    private static void migrateIndexBuildStatus(String keyspace, String table, IndexMetadata index)
-    {
-        if (SystemKeyspace.isIndexBuilt(keyspace, table + '.' + index.name))
-        {
-            SystemKeyspace.setIndexBuilt(keyspace, index.name);
-            SystemKeyspace.setIndexRemoved(keyspace, table + '.' + index.name);
-        }
-    }
-
-    static void unloadLegacySchemaTables()
-    {
-        KeyspaceMetadata systemKeyspace = Schema.instance.getKSMetaData(SchemaConstants.SYSTEM_KEYSPACE_NAME);
-
-        Tables systemTables = systemKeyspace.tables;
-        for (CFMetaData table : LegacySchemaTables)
-            systemTables = systemTables.without(table.cfName);
-
-        LegacySchemaTables.forEach(Schema.instance::unload);
-        LegacySchemaTables.forEach((cfm) -> org.apache.cassandra.db.Keyspace.openAndGetStore(cfm).invalidate());
-
-        Schema.instance.setKeyspaceMetadata(systemKeyspace.withSwapped(systemTables));
-    }
-
-    private static void truncateLegacySchemaTables()
-    {
-        LegacySchemaTables.forEach(table -> Schema.instance.getColumnFamilyStoreInstance(table.cfId).truncateBlocking());
-    }
-
-    private static void storeKeyspaceInNewSchemaTables(Keyspace keyspace)
-    {
-        logger.info("Migrating keyspace {}", keyspace);
-
-        Mutation.SimpleBuilder builder = SchemaKeyspace.makeCreateKeyspaceMutation(keyspace.name, keyspace.params, keyspace.timestamp);
-        for (Table table : keyspace.tables)
-            SchemaKeyspace.addTableToSchemaMutation(table.metadata, true, builder.timestamp(table.timestamp));
-
-        for (Type type : keyspace.types)
-            SchemaKeyspace.addTypeToSchemaMutation(type.metadata, builder.timestamp(type.timestamp));
-
-        for (Function function : keyspace.functions)
-            SchemaKeyspace.addFunctionToSchemaMutation(function.metadata, builder.timestamp(function.timestamp));
-
-        for (Aggregate aggregate : keyspace.aggregates)
-            SchemaKeyspace.addAggregateToSchemaMutation(aggregate.metadata, builder.timestamp(aggregate.timestamp));
-
-        builder.build().apply();
-    }
-
-    /*
-     * Read all keyspaces metadata (including nested tables, types, and functions), with their modification timestamps
-     */
-    private static Collection<Keyspace> readSchema()
-    {
-        String query = format("SELECT keyspace_name FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_KEYSPACES);
-        Collection<String> keyspaceNames = new ArrayList<>();
-        query(query).forEach(row -> keyspaceNames.add(row.getString("keyspace_name")));
-        keyspaceNames.removeAll(SchemaConstants.SYSTEM_KEYSPACE_NAMES);
-
-        Collection<Keyspace> keyspaces = new ArrayList<>();
-        keyspaceNames.forEach(name -> keyspaces.add(readKeyspace(name)));
-        return keyspaces;
-    }
-
-    private static Keyspace readKeyspace(String keyspaceName)
-    {
-        long timestamp = readKeyspaceTimestamp(keyspaceName);
-        KeyspaceParams params = readKeyspaceParams(keyspaceName);
-
-        Collection<Table> tables = readTables(keyspaceName);
-        Collection<Type> types = readTypes(keyspaceName);
-        Collection<Function> functions = readFunctions(keyspaceName);
-        Functions.Builder functionsBuilder = Functions.builder();
-        functions.forEach(udf -> functionsBuilder.add(udf.metadata));
-        Collection<Aggregate> aggregates = readAggregates(functionsBuilder.build(), keyspaceName);
-
-        return new Keyspace(timestamp, keyspaceName, params, tables, types, functions, aggregates);
-    }
-
-    /*
-     * Reading keyspace params
-     */
-
-    private static long readKeyspaceTimestamp(String keyspaceName)
-    {
-        String query = format("SELECT writeTime(durable_writes) AS timestamp FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_KEYSPACES);
-        return query(query, keyspaceName).one().getLong("timestamp");
-    }
-
-    private static KeyspaceParams readKeyspaceParams(String keyspaceName)
-    {
-        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_KEYSPACES);
-        UntypedResultSet.Row row = query(query, keyspaceName).one();
-
-        boolean durableWrites = row.getBoolean("durable_writes");
-
-        Map<String, String> replication = new HashMap<>();
-        replication.putAll(fromJsonMap(row.getString("strategy_options")));
-        replication.put(ReplicationParams.CLASS, row.getString("strategy_class"));
-
-        return KeyspaceParams.create(durableWrites, replication);
-    }
-
-    /*
-     * Reading tables
-     */
-
-    private static Collection<Table> readTables(String keyspaceName)
-    {
-        String query = format("SELECT columnfamily_name FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_COLUMNFAMILIES);
-        Collection<String> tableNames = new ArrayList<>();
-        query(query, keyspaceName).forEach(row -> tableNames.add(row.getString("columnfamily_name")));
-
-        Collection<Table> tables = new ArrayList<>();
-        tableNames.forEach(name -> tables.add(readTable(keyspaceName, name)));
-        return tables;
-    }
-
-    private static Table readTable(String keyspaceName, String tableName)
-    {
-        long timestamp = readTableTimestamp(keyspaceName, tableName);
-        CFMetaData metadata = readTableMetadata(keyspaceName, tableName);
-        return new Table(timestamp, metadata);
-    }
-
-    private static long readTableTimestamp(String keyspaceName, String tableName)
-    {
-        String query = format("SELECT writeTime(type) AS timestamp FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_COLUMNFAMILIES);
-        return query(query, keyspaceName, tableName).one().getLong("timestamp");
-    }
-
-    private static CFMetaData readTableMetadata(String keyspaceName, String tableName)
-    {
-        String tableQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
-                                   SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                   SystemKeyspace.LEGACY_COLUMNFAMILIES);
-        UntypedResultSet.Row tableRow = query(tableQuery, keyspaceName, tableName).one();
-
-        String columnsQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
-                                     SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                     SystemKeyspace.LEGACY_COLUMNS);
-        UntypedResultSet columnRows = query(columnsQuery, keyspaceName, tableName);
-
-        String triggersQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
-                                      SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                      SystemKeyspace.LEGACY_TRIGGERS);
-        UntypedResultSet triggerRows = query(triggersQuery, keyspaceName, tableName);
-
-        return decodeTableMetadata(tableRow, columnRows, triggerRows);
-    }
-
-    private static CFMetaData decodeTableMetadata(UntypedResultSet.Row tableRow,
-                                                  UntypedResultSet columnRows,
-                                                  UntypedResultSet triggerRows)
-    {
-        String ksName = tableRow.getString("keyspace_name");
-        String cfName = tableRow.getString("columnfamily_name");
-
-        AbstractType<?> rawComparator = TypeParser.parse(tableRow.getString("comparator"));
-        AbstractType<?> subComparator = tableRow.has("subcomparator") ? TypeParser.parse(tableRow.getString("subcomparator")) : null;
-
-        boolean isSuper = "super".equals(tableRow.getString("type").toLowerCase(Locale.ENGLISH));
-        boolean isCompound = rawComparator instanceof CompositeType || isSuper;
-
-        /*
-         * Determine whether or not the table is *really* dense
-         * We cannot trust is_dense value of true (see CASSANDRA-11502, that fixed the issue for 2.2 only, and not retroactively),
-         * but we can trust is_dense value of false.
-         */
-        Boolean rawIsDense = tableRow.has("is_dense") ? tableRow.getBoolean("is_dense") : null;
-        boolean isDense;
-        if (rawIsDense != null && !rawIsDense)
-            isDense = false;
-        else
-            isDense = calculateIsDense(rawComparator, columnRows);
-
-        // now, if switched to sparse, remove redundant compact_value column and the last clustering column,
-        // directly copying CASSANDRA-11502 logic. See CASSANDRA-11315.
-        Iterable<UntypedResultSet.Row> filteredColumnRows = !isDense && (rawIsDense == null || rawIsDense)
-                                                          ? filterOutRedundantRowsForSparse(columnRows, isSuper, isCompound)
-                                                          : columnRows;
-
-        // We don't really use the default validator but as we have it for backward compatibility, we use it to know if it's a counter table
-        AbstractType<?> defaultValidator = TypeParser.parse(tableRow.getString("default_validator"));
-        boolean isCounter = defaultValidator instanceof CounterColumnType;
-
-        /*
-         * With CASSANDRA-5202 we stopped inferring the cf id from the combination of keyspace/table names,
-         * and started storing the generated uuids in system.schema_columnfamilies.
-         *
-         * In 3.0 we SHOULD NOT see tables like that (2.0-created, non-upgraded).
-         * But in the off-chance that we do, we generate the deterministic uuid here.
-         */
-        UUID cfId = tableRow.has("cf_id")
-                  ? tableRow.getUUID("cf_id")
-                  : CFMetaData.generateLegacyCfId(ksName, cfName);
-
-        boolean isCQLTable = !isSuper && !isDense && isCompound;
-        boolean isStaticCompactTable = !isDense && !isCompound;
-
-        // Internally, compact tables have a specific layout, see CompactTables. But when upgrading from
-        // previous versions, they may not have the expected schema, so detect if we need to upgrade and do
-        // it in createColumnsFromColumnRows.
-        // We can remove this once we don't support upgrade from versions < 3.0.
-        boolean needsUpgrade = !isCQLTable && checkNeedsUpgrade(filteredColumnRows, isSuper, isStaticCompactTable);
-
-        List<ColumnDefinition> columnDefs = createColumnsFromColumnRows(filteredColumnRows,
-                                                                        ksName,
-                                                                        cfName,
-                                                                        rawComparator,
-                                                                        subComparator,
-                                                                        isSuper,
-                                                                        isCQLTable,
-                                                                        isStaticCompactTable,
-                                                                        needsUpgrade);
-
-        if (needsUpgrade)
-        {
-            addDefinitionForUpgrade(columnDefs,
-                                    ksName,
-                                    cfName,
-                                    isStaticCompactTable,
-                                    isSuper,
-                                    rawComparator,
-                                    subComparator,
-                                    defaultValidator);
-        }
-
-        CFMetaData cfm = CFMetaData.create(ksName,
-                                           cfName,
-                                           cfId,
-                                           isDense,
-                                           isCompound,
-                                           isSuper,
-                                           isCounter,
-                                           false, // legacy schema did not contain views
-                                           columnDefs,
-                                           DatabaseDescriptor.getPartitioner());
-
-        Indexes indexes = createIndexesFromColumnRows(cfm,
-                                                      filteredColumnRows,
-                                                      ksName,
-                                                      cfName,
-                                                      rawComparator,
-                                                      subComparator,
-                                                      isSuper,
-                                                      isCQLTable,
-                                                      isStaticCompactTable,
-                                                      needsUpgrade);
-        cfm.indexes(indexes);
-
-        if (tableRow.has("dropped_columns"))
-            addDroppedColumns(cfm, rawComparator, tableRow.getMap("dropped_columns", UTF8Type.instance, LongType.instance));
-
-        return cfm.params(decodeTableParams(tableRow))
-                  .triggers(createTriggersFromTriggerRows(triggerRows));
-    }
-
-    /*
-     * We call dense a CF for which each component of the comparator is a clustering column, i.e. no
-     * component is used to store a regular column names. In other words, non-composite static "thrift"
-     * and CQL3 CF are *not* dense.
-     * We save whether the table is dense or not during table creation through CQL, but we don't have this
-     * information for table just created through thrift, nor for table prior to CASSANDRA-7744, so this
-     * method does its best to infer whether the table is dense or not based on other elements.
-     */
-    private static boolean calculateIsDense(AbstractType<?> comparator, UntypedResultSet columnRows)
-    {
-        /*
-         * As said above, this method is only here because we need to deal with thrift upgrades.
-         * Once a CF has been "upgraded", i.e. we've rebuilt and save its CQL3 metadata at least once,
-         * then we'll have saved the "is_dense" value and will be good to go.
-         *
-         * But non-upgraded thrift CF (and pre-7744 CF) will have no value for "is_dense", so we need
-         * to infer that information without relying on it in that case. And for the most part this is
-         * easy, a CF that has at least one REGULAR definition is not dense. But the subtlety is that not
-         * having a REGULAR definition may not mean dense because of CQL3 definitions that have only the
-         * PRIMARY KEY defined.
-         *
-         * So we need to recognize those special case CQL3 table with only a primary key. If we have some
-         * clustering columns, we're fine as said above. So the only problem is that we cannot decide for
-         * sure if a CF without REGULAR columns nor CLUSTERING_COLUMN definition is meant to be dense, or if it
-         * has been created in CQL3 by say:
-         *    CREATE TABLE test (k int PRIMARY KEY)
-         * in which case it should not be dense. However, we can limit our margin of error by assuming we are
-         * in the latter case only if the comparator is exactly CompositeType(UTF8Type).
-         */
-        for (UntypedResultSet.Row columnRow : columnRows)
-            if ("regular".equals(columnRow.getString("type")))
-                return false;
-
-        int maxClusteringIdx = -1;
-        for (UntypedResultSet.Row columnRow : columnRows)
-            if ("clustering_key".equals(columnRow.getString("type")))
-                maxClusteringIdx = Math.max(maxClusteringIdx, columnRow.has("component_index") ? columnRow.getInt("component_index") : 0);
-
-        return maxClusteringIdx >= 0
-             ? maxClusteringIdx == comparator.componentsCount() - 1
-             : !isCQL3OnlyPKComparator(comparator);
-    }
-
-    private static Iterable<UntypedResultSet.Row> filterOutRedundantRowsForSparse(UntypedResultSet columnRows, boolean isSuper, boolean isCompound)
-    {
-        Collection<UntypedResultSet.Row> filteredRows = new ArrayList<>();
-        for (UntypedResultSet.Row columnRow : columnRows)
-        {
-            String kind = columnRow.getString("type");
-
-            if ("compact_value".equals(kind))
-                continue;
-
-            if ("clustering_key".equals(kind))
-            {
-                int position = columnRow.has("component_index") ? columnRow.getInt("component_index") : 0;
-                if (isSuper && position != 0)
-                    continue;
-
-                if (!isSuper && !isCompound)
-                    continue;
-            }
-
-            filteredRows.add(columnRow);
-        }
-
-        return filteredRows;
-    }
-
-    private static boolean isCQL3OnlyPKComparator(AbstractType<?> comparator)
-    {
-        if (!(comparator instanceof CompositeType))
-            return false;
-
-        CompositeType ct = (CompositeType)comparator;
-        return ct.types.size() == 1 && ct.types.get(0) instanceof UTF8Type;
-    }
-
-    private static TableParams decodeTableParams(UntypedResultSet.Row row)
-    {
-        TableParams.Builder params = TableParams.builder();
-
-        params.readRepairChance(row.getDouble("read_repair_chance"))
-              .dcLocalReadRepairChance(row.getDouble("local_read_repair_chance"))
-              .gcGraceSeconds(row.getInt("gc_grace_seconds"));
-
-        if (row.has("comment"))
-            params.comment(row.getString("comment"));
-
-        if (row.has("memtable_flush_period_in_ms"))
-            params.memtableFlushPeriodInMs(row.getInt("memtable_flush_period_in_ms"));
-
-        params.caching(CachingParams.fromMap(fromJsonMap(row.getString("caching"))));
-
-        if (row.has("default_time_to_live"))
-            params.defaultTimeToLive(row.getInt("default_time_to_live"));
-
-        if (row.has("speculative_retry"))
-            params.speculativeRetry(SpeculativeRetryParam.fromString(row.getString("speculative_retry")));
-
-        Map<String, String> compressionParameters = fromJsonMap(row.getString("compression_parameters"));
-        String crcCheckChance = compressionParameters.remove("crc_check_chance");
-        //crc_check_chance was promoted from a compression property to a top-level property
-        if (crcCheckChance != null)
-            params.crcCheckChance(Double.parseDouble(crcCheckChance));
-
-        params.compression(CompressionParams.fromMap(compressionParameters));
-
-        params.compaction(compactionFromRow(row));
-
-        if (row.has("min_index_interval"))
-            params.minIndexInterval(row.getInt("min_index_interval"));
-
-        if (row.has("max_index_interval"))
-            params.maxIndexInterval(row.getInt("max_index_interval"));
-
-        if (row.has("bloom_filter_fp_chance"))
-            params.bloomFilterFpChance(row.getDouble("bloom_filter_fp_chance"));
-
-        return params.build();
-    }
-
-    /*
-     * The method is needed - to migrate max_compaction_threshold and min_compaction_threshold
-     * to the compaction map, where they belong.
-     *
-     * We must use reflection to validate the options because not every compaction strategy respects and supports
-     * the threshold params (LCS doesn't, STCS and DTCS do).
-     */
-    @SuppressWarnings("unchecked")
-    private static CompactionParams compactionFromRow(UntypedResultSet.Row row)
-    {
-        Class<? extends AbstractCompactionStrategy> klass =
-            CFMetaData.createCompactionStrategy(row.getString("compaction_strategy_class"));
-        Map<String, String> options = fromJsonMap(row.getString("compaction_strategy_options"));
-
-        int minThreshold = row.getInt("min_compaction_threshold");
-        int maxThreshold = row.getInt("max_compaction_threshold");
-
-        Map<String, String> optionsWithThresholds = new HashMap<>(options);
-        optionsWithThresholds.putIfAbsent(CompactionParams.Option.MIN_THRESHOLD.toString(), Integer.toString(minThreshold));
-        optionsWithThresholds.putIfAbsent(CompactionParams.Option.MAX_THRESHOLD.toString(), Integer.toString(maxThreshold));
-
-        try
-        {
-            Map<String, String> unrecognizedOptions =
-                (Map<String, String>) klass.getMethod("validateOptions", Map.class).invoke(null, optionsWithThresholds);
-
-            if (unrecognizedOptions.isEmpty())
-                options = optionsWithThresholds;
-        }
-        catch (Exception e)
-        {
-            throw new RuntimeException(e);
-        }
-
-        return CompactionParams.create(klass, options);
-    }
-
-    // Should only be called on compact tables
-    private static boolean checkNeedsUpgrade(Iterable<UntypedResultSet.Row> defs, boolean isSuper, boolean isStaticCompactTable)
-    {
-        if (isSuper)
-        {
-            // Check if we've added the "supercolumn map" column yet or not
-            for (UntypedResultSet.Row row : defs)
-                if (row.getString("column_name").isEmpty())
-                    return false;
-            return true;
-        }
-
-        // For static compact tables, we need to upgrade if the regular definitions haven't been converted to static yet,
-        // i.e. if we don't have a static definition yet.
-        if (isStaticCompactTable)
-            return !hasKind(defs, ColumnDefinition.Kind.STATIC);
-
-        // For dense compact tables, we need to upgrade if we don't have a compact value definition
-        return !hasRegularColumns(defs);
-    }
-
-    private static boolean hasRegularColumns(Iterable<UntypedResultSet.Row> columnRows)
-    {
-        for (UntypedResultSet.Row row : columnRows)
-        {
-            /*
-             * We need to special case and ignore the empty compact column (pre-3.0, COMPACT STORAGE, primary-key only tables),
-             * since deserializeKind() will otherwise just return a REGULAR.
-             * We want the proper EmptyType regular column to be added by addDefinitionForUpgrade(), so we need
-             * checkNeedsUpgrade() to return true in this case.
-             * See CASSANDRA-9874.
-             */
-            if (isEmptyCompactValueColumn(row))
-                return false;
-
-            if (deserializeKind(row.getString("type")) == ColumnDefinition.Kind.REGULAR)
-                return true;
-        }
-
-        return false;
-    }
-
-    private static boolean isEmptyCompactValueColumn(UntypedResultSet.Row row)
-    {
-        return "compact_value".equals(row.getString("type")) && row.getString("column_name").isEmpty();
-    }
-
-    private static void addDefinitionForUpgrade(List<ColumnDefinition> defs,
-                                                String ksName,
-                                                String cfName,
-                                                boolean isStaticCompactTable,
-                                                boolean isSuper,
-                                                AbstractType<?> rawComparator,
-                                                AbstractType<?> subComparator,
-                                                AbstractType<?> defaultValidator)
-    {
-        CompactTables.DefaultNames names = CompactTables.defaultNameGenerator(defs);
-
-        if (isSuper)
-        {
-            defs.add(ColumnDefinition.regularDef(ksName, cfName, CompactTables.SUPER_COLUMN_MAP_COLUMN_STR, MapType.getInstance(subComparator, defaultValidator, true)));
-        }
-        else if (isStaticCompactTable)
-        {
-            defs.add(ColumnDefinition.clusteringDef(ksName, cfName, names.defaultClusteringName(), rawComparator, 0));
-            defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), defaultValidator));
-        }
-        else
-        {
-            // For dense compact tables, we get here if we don't have a compact value column, in which case we should add it
-            // (we use EmptyType to recognize that the compact value was not declared by the use (see CreateTableStatement too))
-            defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), EmptyType.instance));
-        }
-    }
-
-    private static boolean hasKind(Iterable<UntypedResultSet.Row> defs, ColumnDefinition.Kind kind)
-    {
-        for (UntypedResultSet.Row row : defs)
-            if (deserializeKind(row.getString("type")) == kind)
-                return true;
-
-        return false;
-    }
-
-    /*
-     * Prior to 3.0 we used to not store the type of the dropped columns, relying on all collection info being
-     * present in the comparator, forever. That allowed us to perform certain validations in AlterTableStatement
-     * (namely not allowing to re-add incompatible collection columns, with the same name, but a different type).
-     *
-     * In 3.0, we no longer preserve the original comparator, and reconstruct it from the columns instead. That means
-     * that we should preserve the type of the dropped columns now, and, during migration, fetch the types from
-     * the original comparator if necessary.
-     */
-    private static void addDroppedColumns(CFMetaData cfm, AbstractType<?> comparator, Map<String, Long> droppedTimes)
-    {
-        AbstractType<?> last = comparator.getComponents().get(comparator.componentsCount() - 1);
-        Map<ByteBuffer, CollectionType> collections = last instanceof ColumnToCollectionType
-                                                    ? ((ColumnToCollectionType) last).defined
-                                                    : Collections.emptyMap();
-
-        for (Map.Entry<String, Long> entry : droppedTimes.entrySet())
-        {
-            String name = entry.getKey();
-            ByteBuffer nameBytes = UTF8Type.instance.decompose(name);
-            long time = entry.getValue();
-
-            AbstractType<?> type = collections.containsKey(nameBytes)
-                                 ? collections.get(nameBytes)
-                                 : BytesType.instance;
-
-            cfm.getDroppedColumns().put(nameBytes, new CFMetaData.DroppedColumn(name, type, time, ColumnDefinition.Kind.REGULAR));
-        }
-    }
-
-    private static List<ColumnDefinition> createColumnsFromColumnRows(Iterable<UntypedResultSet.Row> rows,
-                                                                      String keyspace,
-                                                                      String table,
-                                                                      AbstractType<?> rawComparator,
-                                                                      AbstractType<?> rawSubComparator,
-                                                                      boolean isSuper,
-                                                                      boolean isCQLTable,
-                                                                      boolean isStaticCompactTable,
-                                                                      boolean needsUpgrade)
-    {
-        List<ColumnDefinition> columns = new ArrayList<>();
-
-        for (UntypedResultSet.Row row : rows)
-        {
-            // Skip the empty compact value column. Make addDefinitionForUpgrade() re-add the proper REGULAR one.
-            if (isEmptyCompactValueColumn(row))
-                continue;
-
-            columns.add(createColumnFromColumnRow(row,
-                                                  keyspace,
-                                                  table,
-                                                  rawComparator,
-                                                  rawSubComparator,
-                                                  isSuper,
-                                                  isCQLTable,
-                                                  isStaticCompactTable,
-                                                  needsUpgrade));
-        }
-
-        return columns;
-    }
-
-    private static ColumnDefinition createColumnFromColumnRow(UntypedResultSet.Row row,
-                                                              String keyspace,
-                                                              String table,
-                                                              AbstractType<?> rawComparator,
-                                                              AbstractType<?> rawSubComparator,
-                                                              boolean isSuper,
-                                                              boolean isCQLTable,
-                                                              boolean isStaticCompactTable,
-                                                              boolean needsUpgrade)
-    {
-        String rawKind = row.getString("type");
-
-        ColumnDefinition.Kind kind = deserializeKind(rawKind);
-        if (needsUpgrade && isStaticCompactTable && kind == ColumnDefinition.Kind.REGULAR)
-            kind = ColumnDefinition.Kind.STATIC;
-
-        int componentIndex = ColumnDefinition.NO_POSITION;
-        // Note that the component_index is not useful for non-primary key parts (it never really in fact since there is
-        // no particular ordering of non-PK columns, we only used to use it as a simplification but that's not needed
-        // anymore)
-        if (kind.isPrimaryKeyKind())
-            // We use to not have a component index when there was a single partition key, we don't anymore (#10491)
-            componentIndex = row.has("component_index") ? row.getInt("component_index") : 0;
-
-        // Note: we save the column name as string, but we should not assume that it is an UTF8 name, we
-        // we need to use the comparator fromString method
-        AbstractType<?> comparator = isCQLTable
-                                     ? UTF8Type.instance
-                                     : CompactTables.columnDefinitionComparator(rawKind, isSuper, rawComparator, rawSubComparator);
-        ColumnIdentifier name = ColumnIdentifier.getInterned(comparator.fromString(row.getString("column_name")), comparator);
-
-        AbstractType<?> validator = parseType(row.getString("validator"));
-
-        // In the 2.x schema we didn't store UDT's with a FrozenType wrapper because they were implicitly frozen.  After
-        // CASSANDRA-7423 (non-frozen UDTs), this is no longer true, so we need to freeze UDTs and nested freezable
-        // types (UDTs and collections) to properly migrate the schema.  See CASSANDRA-11609 and CASSANDRA-11613.
-        if (validator.isUDT() && validator.isMultiCell())
-            validator = validator.freeze();
-        else
-            validator = validator.freezeNestedMulticellTypes();
-
-        return new ColumnDefinition(keyspace, table, name, validator, componentIndex, kind);
-    }
-
-    private static Indexes createIndexesFromColumnRows(CFMetaData cfm,
-                                                       Iterable<UntypedResultSet.Row> rows,
-                                                       String keyspace,
-                                                       String table,
-                                                       AbstractType<?> rawComparator,
-                                                       AbstractType<?> rawSubComparator,
-                                                       boolean isSuper,
-                                                       boolean isCQLTable,
-                                                       boolean isStaticCompactTable,
-                                                       boolean needsUpgrade)
-    {
-        Indexes.Builder indexes = Indexes.builder();
-
-        for (UntypedResultSet.Row row : rows)
-        {
-            IndexMetadata.Kind kind = null;
-            if (row.has("index_type"))
-                kind = IndexMetadata.Kind.valueOf(row.getString("index_type"));
-
-            if (kind == null)
-                continue;
-
-            Map<String, String> indexOptions = null;
-            if (row.has("index_options"))
-                indexOptions = fromJsonMap(row.getString("index_options"));
-
-            if (row.has("index_name"))
-            {
-                String indexName = row.getString("index_name");
-
-                ColumnDefinition column = createColumnFromColumnRow(row,
-                                                                    keyspace,
-                                                                    table,
-                                                                    rawComparator,
-                                                                    rawSubComparator,
-                                                                    isSuper,
-                                                                    isCQLTable,
-                                                                    isStaticCompactTable,
-                                                                    needsUpgrade);
-
-                indexes.add(IndexMetadata.fromLegacyMetadata(cfm, column, indexName, kind, indexOptions));
-            }
-            else
-            {
-                logger.error("Failed to find index name for legacy migration of index on {}.{}", keyspace, table);
-            }
-        }
-
-        return indexes.build();
-    }
-
-    private static ColumnDefinition.Kind deserializeKind(String kind)
-    {
-        if ("clustering_key".equalsIgnoreCase(kind))
-            return ColumnDefinition.Kind.CLUSTERING;
-
-        if ("compact_value".equalsIgnoreCase(kind))
-            return ColumnDefinition.Kind.REGULAR;
-
-        return Enum.valueOf(ColumnDefinition.Kind.class, kind.toUpperCase());
-    }
-
-    private static Triggers createTriggersFromTriggerRows(UntypedResultSet rows)
-    {
-        Triggers.Builder triggers = org.apache.cassandra.schema.Triggers.builder();
-        rows.forEach(row -> triggers.add(createTriggerFromTriggerRow(row)));
-        return triggers.build();
-    }
-
-    private static TriggerMetadata createTriggerFromTriggerRow(UntypedResultSet.Row row)
-    {
-        String name = row.getString("trigger_name");
-        String classOption = row.getTextMap("trigger_options").get("class");
-        return new TriggerMetadata(name, classOption);
-    }
-
-    /*
-     * Reading user types
-     */
-
-    private static Collection<Type> readTypes(String keyspaceName)
-    {
-        String query = format("SELECT type_name FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_USERTYPES);
-        Collection<String> typeNames = new ArrayList<>();
-        query(query, keyspaceName).forEach(row -> typeNames.add(row.getString("type_name")));
-
-        Collection<Type> types = new ArrayList<>();
-        typeNames.forEach(name -> types.add(readType(keyspaceName, name)));
-        return types;
-    }
-
-    private static Type readType(String keyspaceName, String typeName)
-    {
-        long timestamp = readTypeTimestamp(keyspaceName, typeName);
-        UserType metadata = readTypeMetadata(keyspaceName, typeName);
-        return new Type(timestamp, metadata);
-    }
-
-    /*
-     * Unfortunately there is not a single REGULAR column in system.schema_usertypes, so annoyingly we cannot
-     * use the writeTime() CQL function, and must resort to a lower level.
-     */
-    private static long readTypeTimestamp(String keyspaceName, String typeName)
-    {
-        ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME)
-                                                                  .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES);
-
-        ClusteringComparator comparator = store.metadata.comparator;
-        Slices slices = Slices.with(comparator, Slice.make(comparator, typeName));
-        int nowInSec = FBUtilities.nowInSeconds();
-        DecoratedKey key = store.metadata.decorateKey(AsciiType.instance.fromString(keyspaceName));
-        SinglePartitionReadCommand command = SinglePartitionReadCommand.create(store.metadata, nowInSec, key, slices);
-
-        try (ReadExecutionController controller = command.executionController();
-             RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, controller), nowInSec))
-        {
-            return partition.next().primaryKeyLivenessInfo().timestamp();
-        }
-    }
-
-    private static UserType readTypeMetadata(String keyspaceName, String typeName)
-    {
-        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND type_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_USERTYPES);
-        UntypedResultSet.Row row = query(query, keyspaceName, typeName).one();
-
-        List<FieldIdentifier> names =
-            row.getList("field_names", UTF8Type.instance)
-               .stream()
-               .map(t -> FieldIdentifier.forInternalString(t))
-               .collect(Collectors.toList());
-
-        List<AbstractType<?>> types =
-            row.getList("field_types", UTF8Type.instance)
-               .stream()
-               .map(LegacySchemaMigrator::parseType)
-               .collect(Collectors.toList());
-
-        return new UserType(keyspaceName, bytes(typeName), names, types, true);
-    }
-
-    /*
-     * Reading UDFs
-     */
-
-    private static Collection<Function> readFunctions(String keyspaceName)
-    {
-        String query = format("SELECT function_name, signature FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_FUNCTIONS);
-        HashMultimap<String, List<String>> functionSignatures = HashMultimap.create();
-        query(query, keyspaceName).forEach(row -> functionSignatures.put(row.getString("function_name"), row.getList("signature", UTF8Type.instance)));
-
-        Collection<Function> functions = new ArrayList<>();
-        functionSignatures.entries().forEach(pair -> functions.add(readFunction(keyspaceName, pair.getKey(), pair.getValue())));
-        return functions;
-    }
-
-    private static Function readFunction(String keyspaceName, String functionName, List<String> signature)
-    {
-        long timestamp = readFunctionTimestamp(keyspaceName, functionName, signature);
-        UDFunction metadata = readFunctionMetadata(keyspaceName, functionName, signature);
-        return new Function(timestamp, metadata);
-    }
-
-    private static long readFunctionTimestamp(String keyspaceName, String functionName, List<String> signature)
-    {
-        String query = format("SELECT writeTime(return_type) AS timestamp " +
-                              "FROM %s.%s " +
-                              "WHERE keyspace_name = ? AND function_name = ? AND signature = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_FUNCTIONS);
-        return query(query, keyspaceName, functionName, signature).one().getLong("timestamp");
-    }
-
-    private static UDFunction readFunctionMetadata(String keyspaceName, String functionName, List<String> signature)
-    {
-        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND function_name = ? AND signature = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_FUNCTIONS);
-        UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one();
-
-        FunctionName name = new FunctionName(keyspaceName, functionName);
-
-        List<ColumnIdentifier> argNames = new ArrayList<>();
-        if (row.has("argument_names"))
-            for (String arg : row.getList("argument_names", UTF8Type.instance))
-                argNames.add(new ColumnIdentifier(arg, true));
-
-        List<AbstractType<?>> argTypes = new ArrayList<>();
-        if (row.has("argument_types"))
-            for (String type : row.getList("argument_types", UTF8Type.instance))
-                argTypes.add(parseType(type));
-
-        AbstractType<?> returnType = parseType(row.getString("return_type"));
-
-        String language = row.getString("language");
-        String body = row.getString("body");
-        boolean calledOnNullInput = row.getBoolean("called_on_null_input");
-
-        try
-        {
-            return UDFunction.create(name, argNames, argTypes, returnType, calledOnNullInput, language, body);
-        }
-        catch (InvalidRequestException e)
-        {
-            return UDFunction.createBrokenFunction(name, argNames, argTypes, returnType, calledOnNullInput, language, body, e);
-        }
-    }
-
-    /*
-     * Reading UDAs
-     */
-
-    private static Collection<Aggregate> readAggregates(Functions functions, String keyspaceName)
-    {
-        String query = format("SELECT aggregate_name, signature FROM %s.%s WHERE keyspace_name = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_AGGREGATES);
-        HashMultimap<String, List<String>> aggregateSignatures = HashMultimap.create();
-        query(query, keyspaceName).forEach(row -> aggregateSignatures.put(row.getString("aggregate_name"), row.getList("signature", UTF8Type.instance)));
-
-        Collection<Aggregate> aggregates = new ArrayList<>();
-        aggregateSignatures.entries().forEach(pair -> aggregates.add(readAggregate(functions, keyspaceName, pair.getKey(), pair.getValue())));
-        return aggregates;
-    }
-
-    private static Aggregate readAggregate(Functions functions, String keyspaceName, String aggregateName, List<String> signature)
-    {
-        long timestamp = readAggregateTimestamp(keyspaceName, aggregateName, signature);
-        UDAggregate metadata = readAggregateMetadata(functions, keyspaceName, aggregateName, signature);
-        return new Aggregate(timestamp, metadata);
-    }
-
-    private static long readAggregateTimestamp(String keyspaceName, String aggregateName, List<String> signature)
-    {
-        String query = format("SELECT writeTime(return_type) AS timestamp " +
-                              "FROM %s.%s " +
-                              "WHERE keyspace_name = ? AND aggregate_name = ? AND signature = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_AGGREGATES);
-        return query(query, keyspaceName, aggregateName, signature).one().getLong("timestamp");
-    }
-
-    private static UDAggregate readAggregateMetadata(Functions functions, String keyspaceName, String functionName, List<String> signature)
-    {
-        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND aggregate_name = ? AND signature = ?",
-                              SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                              SystemKeyspace.LEGACY_AGGREGATES);
-        UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one();
-
-        FunctionName name = new FunctionName(keyspaceName, functionName);
-
-        List<String> types = row.getList("argument_types", UTF8Type.instance);
-
-        List<AbstractType<?>> argTypes = new ArrayList<>();
-        if (types != null)
-        {
-            argTypes = new ArrayList<>(types.size());
-            for (String type : types)
-                argTypes.add(parseType(type));
-        }
-
-        AbstractType<?> returnType = parseType(row.getString("return_type"));
-
-        FunctionName stateFunc = new FunctionName(keyspaceName, row.getString("state_func"));
-        AbstractType<?> stateType = parseType(row.getString("state_type"));
-        FunctionName finalFunc = row.has("final_func") ? new FunctionName(keyspaceName, row.getString("final_func")) : null;
-        ByteBuffer initcond = row.has("initcond") ? row.getBytes("initcond") : null;
-
-        try
-        {
-            return UDAggregate.create(functions, name, argTypes, returnType, stateFunc, finalFunc, stateType, initcond);
-        }
-        catch (InvalidRequestException reason)
-        {
-            return UDAggregate.createBroken(name, argTypes, returnType, initcond, reason);
-        }
-    }
-
-    private static UntypedResultSet query(String query, Object... values)
-    {
-        return QueryProcessor.executeOnceInternal(query, values);
-    }
-
-    private static AbstractType<?> parseType(String str)
-    {
-        return TypeParser.parse(str);
-    }
-
-    private static final class Keyspace
-    {
-        final long timestamp;
-        final String name;
-        final KeyspaceParams params;
-        final Collection<Table> tables;
-        final Collection<Type> types;
-        final Collection<Function> functions;
-        final Collection<Aggregate> aggregates;
-
-        Keyspace(long timestamp,
-                 String name,
-                 KeyspaceParams params,
-                 Collection<Table> tables,
-                 Collection<Type> types,
-                 Collection<Function> functions,
-                 Collection<Aggregate> aggregates)
-        {
-            this.timestamp = timestamp;
-            this.name = name;
-            this.params = params;
-            this.tables = tables;
-            this.types = types;
-            this.functions = functions;
-            this.aggregates = aggregates;
-        }
-    }
-
-    private static final class Table
-    {
-        final long timestamp;
-        final CFMetaData metadata;
-
-        Table(long timestamp, CFMetaData metadata)
-        {
-            this.timestamp = timestamp;
-            this.metadata = metadata;
-        }
-    }
-
-    private static final class Type
-    {
-        final long timestamp;
-        final UserType metadata;
-
-        Type(long timestamp, UserType metadata)
-        {
-            this.timestamp = timestamp;
-            this.metadata = metadata;
-        }
-    }
-
-    private static final class Function
-    {
-        final long timestamp;
-        final UDFunction metadata;
-
-        Function(long timestamp, UDFunction metadata)
-        {
-            this.timestamp = timestamp;
-            this.metadata = metadata;
-        }
-    }
-
-    private static final class Aggregate
-    {
-        final long timestamp;
-        final UDAggregate metadata;
-
-        Aggregate(long timestamp, UDAggregate metadata)
-        {
-            this.timestamp = timestamp;
-            this.metadata = metadata;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/AbstractReadExecutor.java b/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
index 7aa926e..8944b7c 100644
--- a/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
+++ b/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
@@ -106,7 +106,7 @@ public abstract class AbstractReadExecutor
             if (traceState != null)
                 traceState.trace("reading {} from {}", readCommand.isDigestQuery() ? "digest" : "data", endpoint);
             logger.trace("reading {} from {}", readCommand.isDigestQuery() ? "digest" : "data", endpoint);
-            MessageOut<ReadCommand> message = readCommand.createMessage(MessagingService.instance().getVersion(endpoint));
+            MessageOut<ReadCommand> message = readCommand.createMessage();
             MessagingService.instance().sendRRWithFailure(message, endpoint, handler);
         }
 
@@ -291,8 +291,7 @@ public abstract class AbstractReadExecutor
                 if (traceState != null)
                     traceState.trace("speculating read retry on {}", extraReplica);
                 logger.trace("speculating read retry on {}", extraReplica);
-                int version = MessagingService.instance().getVersion(extraReplica);
-                MessagingService.instance().sendRRWithFailure(retryCommand.createMessage(version), extraReplica, handler);
+                MessagingService.instance().sendRRWithFailure(retryCommand.createMessage(), extraReplica, handler);
                 speculated = true;
 
                 cfs.metric.speculativeRetries.inc();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/CacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index a4e18c0..54fa7e2 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -454,10 +454,6 @@ public class CacheService implements CacheServiceMBean
     {
         public void serialize(KeyCacheKey key, DataOutputPlus out, ColumnFamilyStore cfs) throws IOException
         {
-            //Don't serialize old format entries since we didn't bother to implement serialization of both for simplicity
-            //https://issues.apache.org/jira/browse/CASSANDRA-10778
-            if (!key.desc.version.storeRows()) return;
-
             RowIndexEntry entry = CacheService.instance.keyCache.getInternal(key);
             if (entry == null)
                 return;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/CassandraDaemon.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CassandraDaemon.java b/src/java/org/apache/cassandra/service/CassandraDaemon.java
index 5a97dfe..b41cc00 100644
--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java
+++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java
@@ -46,7 +46,6 @@ import com.google.common.util.concurrent.Uninterruptibles;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.batchlog.LegacyBatchlogMigrator;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -59,14 +58,12 @@ import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.StartupException;
 import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.hints.LegacyHintsMigrator;
 import org.apache.cassandra.io.FSError;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.CassandraMetricsRegistry;
 import org.apache.cassandra.metrics.DefaultNameFactory;
 import org.apache.cassandra.metrics.StorageMetrics;
-import org.apache.cassandra.schema.LegacySchemaMigrator;
 import org.apache.cassandra.thrift.ThriftServer;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.*;
@@ -205,18 +202,6 @@ public class CassandraDaemon
             exitOrFail(e.returnCode, e.getMessage(), e.getCause());
         }
 
-        try
-        {
-            if (SystemKeyspace.snapshotOnVersionChange())
-            {
-                SystemKeyspace.migrateDataDirs();
-            }
-        }
-        catch (IOException e)
-        {
-            exitOrFail(3, e.getMessage(), e.getCause());
-        }
-
         // We need to persist this as soon as possible after startup checks.
         // This should be the first write to SystemKeyspace (CASSANDRA-11742)
         SystemKeyspace.persistLocalMetadata();
@@ -249,13 +234,6 @@ public class CassandraDaemon
             }
         });
 
-        /*
-         * Migrate pre-3.0 keyspaces, tables, types, functions, and aggregates, to their new 3.0 storage.
-         * We don't (and can't) wait for commit log replay here, but we don't need to - all schema changes force
-         * explicit memtable flushes.
-         */
-        LegacySchemaMigrator.migrate();
-
         // Populate token metadata before flushing, for token-aware sstable partitioning (#6696)
         StorageService.instance.populateTokenMetadata();
 
@@ -333,12 +311,6 @@ public class CassandraDaemon
         // Re-populate token metadata after commit log recover (new peers might be loaded onto system keyspace #10293)
         StorageService.instance.populateTokenMetadata();
 
-        // migrate any legacy (pre-3.0) hints from system.hints table into the new store
-        new LegacyHintsMigrator(DatabaseDescriptor.getHintsDirectory(), DatabaseDescriptor.getMaxHintsFileSize()).migrate();
-
-        // migrate any legacy (pre-3.0) batch entries from system.batchlog to system.batches (new table format)
-        LegacyBatchlogMigrator.migrate();
-
         // enable auto compaction
         for (Keyspace keyspace : Keyspace.all())
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/DataResolver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/DataResolver.java b/src/java/org/apache/cassandra/service/DataResolver.java
index be8eca1..48ad2c6 100644
--- a/src/java/org/apache/cassandra/service/DataResolver.java
+++ b/src/java/org/apache/cassandra/service/DataResolver.java
@@ -512,7 +512,7 @@ public class DataResolver extends ResponseResolver
                 if (StorageProxy.canDoLocalRequest(source))
                       StageManager.getStage(Stage.READ).maybeExecuteImmediately(new StorageProxy.LocalReadRunnable(retryCommand, handler));
                 else
-                    MessagingService.instance().sendRRWithFailure(retryCommand.createMessage(MessagingService.current_version), source, handler);
+                    MessagingService.instance().sendRRWithFailure(retryCommand.createMessage(), source, handler);
 
                 // We don't call handler.get() because we want to preserve tombstones since we're still in the middle of merging node results.
                 handler.awaitResults();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/ReadCallback.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/ReadCallback.java b/src/java/org/apache/cassandra/service/ReadCallback.java
index 11c0b12..6e0fadb 100644
--- a/src/java/org/apache/cassandra/service/ReadCallback.java
+++ b/src/java/org/apache/cassandra/service/ReadCallback.java
@@ -247,10 +247,7 @@ public class ReadCallback implements IAsyncCallbackWithFailure<ReadResponse>
                 AsyncRepairCallback repairHandler = new AsyncRepairCallback(repairResolver, endpoints.size());
 
                 for (InetAddress endpoint : endpoints)
-                {
-                    MessageOut<ReadCommand> message = command.createMessage(MessagingService.instance().getVersion(endpoint));
-                    MessagingService.instance().sendRR(message, endpoint, repairHandler);
-                }
+                    MessagingService.instance().sendRR(command.createMessage(), endpoint, repairHandler);
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/StartupChecks.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StartupChecks.java b/src/java/org/apache/cassandra/service/StartupChecks.java
index 83971dd..75f7788 100644
--- a/src/java/org/apache/cassandra/service/StartupChecks.java
+++ b/src/java/org/apache/cassandra/service/StartupChecks.java
@@ -259,14 +259,15 @@ public class StartupChecks
 
             FileVisitor<Path> sstableVisitor = new SimpleFileVisitor<Path>()
             {
-                public FileVisitResult visitFile(Path file, BasicFileAttributes attrs)
+                public FileVisitResult visitFile(Path path, BasicFileAttributes attrs)
                 {
-                    if (!Descriptor.isValidFile(file.getFileName().toString()))
+                    File file = path.toFile();
+                    if (!Descriptor.isValidFile(file))
                         return FileVisitResult.CONTINUE;
 
                     try
                     {
-                        if (!Descriptor.fromFilename(file.toString()).isCompatible())
+                        if (!Descriptor.fromFilename(file).isCompatible())
                             invalid.add(file.toString());
                     }
                     catch (Exception e)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/StorageProxy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageProxy.java b/src/java/org/apache/cassandra/service/StorageProxy.java
index e0be68c..77862d6 100644
--- a/src/java/org/apache/cassandra/service/StorageProxy.java
+++ b/src/java/org/apache/cassandra/service/StorageProxy.java
@@ -42,7 +42,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.batchlog.Batch;
 import org.apache.cassandra.batchlog.BatchlogManager;
-import org.apache.cassandra.batchlog.LegacyBatchlogMigrator;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.config.CFMetaData;
@@ -909,10 +908,10 @@ public class StorageProxy implements StorageProxyMBean
                     batchConsistencyLevel = consistency_level;
             }
 
-            final BatchlogEndpoints batchlogEndpoints = getBatchlogEndpoints(localDataCenter, batchConsistencyLevel);
+            final Collection<InetAddress> batchlogEndpoints = getBatchlogEndpoints(localDataCenter, batchConsistencyLevel);
             final UUID batchUUID = UUIDGen.getTimeUUID();
             BatchlogResponseHandler.BatchlogCleanup cleanup = new BatchlogResponseHandler.BatchlogCleanup(mutations.size(),
-                                                                                                          () -> asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID, queryStartNanoTime));
+                                                                                                          () -> asyncRemoveFromBatchlog(batchlogEndpoints, batchUUID));
 
             // add a handler for each mutation - includes checking availability, but doesn't initiate any writes, yet
             for (Mutation mutation : mutations)
@@ -969,33 +968,19 @@ public class StorageProxy implements StorageProxyMBean
         return replica.equals(FBUtilities.getBroadcastAddress());
     }
 
-    private static void syncWriteToBatchlog(Collection<Mutation> mutations, BatchlogEndpoints endpoints, UUID uuid, long queryStartNanoTime)
+    private static void syncWriteToBatchlog(Collection<Mutation> mutations, Collection<InetAddress> endpoints, UUID uuid, long queryStartNanoTime)
     throws WriteTimeoutException, WriteFailureException
     {
-        WriteResponseHandler<?> handler = new WriteResponseHandler<>(endpoints.all,
+        WriteResponseHandler<?> handler = new WriteResponseHandler<>(endpoints,
                                                                      Collections.<InetAddress>emptyList(),
-                                                                     endpoints.all.size() == 1 ? ConsistencyLevel.ONE : ConsistencyLevel.TWO,
+                                                                     endpoints.size() == 1 ? ConsistencyLevel.ONE : ConsistencyLevel.TWO,
                                                                      Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME),
                                                                      null,
                                                                      WriteType.BATCH_LOG,
                                                                      queryStartNanoTime);
 
         Batch batch = Batch.createLocal(uuid, FBUtilities.timestampMicros(), mutations);
-
-        if (!endpoints.current.isEmpty())
-            syncWriteToBatchlog(handler, batch, endpoints.current);
-
-        if (!endpoints.legacy.isEmpty())
-            LegacyBatchlogMigrator.syncWriteToBatchlog(handler, batch, endpoints.legacy);
-
-        handler.get();
-    }
-
-    private static void syncWriteToBatchlog(WriteResponseHandler<?> handler, Batch batch, Collection<InetAddress> endpoints)
-    throws WriteTimeoutException, WriteFailureException
-    {
         MessageOut<Batch> message = new MessageOut<>(MessagingService.Verb.BATCH_STORE, batch, Batch.serializer);
-
         for (InetAddress target : endpoints)
         {
             logger.trace("Sending batchlog store request {} to {} for {} mutations", batch.id, target, batch.size());
@@ -1005,15 +990,7 @@ public class StorageProxy implements StorageProxyMBean
             else
                 MessagingService.instance().sendRR(message, target, handler);
         }
-    }
-
-    private static void asyncRemoveFromBatchlog(BatchlogEndpoints endpoints, UUID uuid, long queryStartNanoTime)
-    {
-        if (!endpoints.current.isEmpty())
-            asyncRemoveFromBatchlog(endpoints.current, uuid);
-
-        if (!endpoints.legacy.isEmpty())
-            LegacyBatchlogMigrator.asyncRemoveFromBatchlog(endpoints.legacy, uuid, queryStartNanoTime);
+        handler.get();
     }
 
     private static void asyncRemoveFromBatchlog(Collection<InetAddress> endpoints, UUID uuid)
@@ -1160,38 +1137,13 @@ public class StorageProxy implements StorageProxyMBean
     }
 
     /*
-     * A class to filter batchlog endpoints into legacy endpoints (version < 3.0) or not.
-     */
-    private static final class BatchlogEndpoints
-    {
-        public final Collection<InetAddress> all;
-        public final Collection<InetAddress> current;
-        public final Collection<InetAddress> legacy;
-
-        BatchlogEndpoints(Collection<InetAddress> endpoints)
-        {
-            all = endpoints;
-            current = new ArrayList<>(2);
-            legacy = new ArrayList<>(2);
-
-            for (InetAddress ep : endpoints)
-            {
-                if (MessagingService.instance().getVersion(ep) >= MessagingService.VERSION_30)
-                    current.add(ep);
-                else
-                    legacy.add(ep);
-            }
-        }
-    }
-
-    /*
      * Replicas are picked manually:
      * - replicas should be alive according to the failure detector
      * - replicas should be in the local datacenter
      * - choose min(2, number of qualifying candiates above)
      * - allow the local node to be the only replica only if it's a single-node DC
      */
-    private static BatchlogEndpoints getBatchlogEndpoints(String localDataCenter, ConsistencyLevel consistencyLevel)
+    private static Collection<InetAddress> getBatchlogEndpoints(String localDataCenter, ConsistencyLevel consistencyLevel)
     throws UnavailableException
     {
         TokenMetadata.Topology topology = StorageService.instance.getTokenMetadata().cachedOnlyTokenMap().getTopology();
@@ -1202,12 +1154,12 @@ public class StorageProxy implements StorageProxyMBean
         if (chosenEndpoints.isEmpty())
         {
             if (consistencyLevel == ConsistencyLevel.ANY)
-                return new BatchlogEndpoints(Collections.singleton(FBUtilities.getBroadcastAddress()));
+                return Collections.singleton(FBUtilities.getBroadcastAddress());
 
             throw new UnavailableException(ConsistencyLevel.ONE, 1, 0);
         }
 
-        return new BatchlogEndpoints(chosenEndpoints);
+        return chosenEndpoints;
     }
 
     /**
@@ -1816,9 +1768,8 @@ public class StorageProxy implements StorageProxyMBean
 
                 for (InetAddress endpoint : executor.getContactedReplicas())
                 {
-                    MessageOut<ReadCommand> message = command.createMessage(MessagingService.instance().getVersion(endpoint));
                     Tracing.trace("Enqueuing full data read to {}", endpoint);
-                    MessagingService.instance().sendRRWithFailure(message, endpoint, repairHandler);
+                    MessagingService.instance().sendRRWithFailure(command.createMessage(), endpoint, repairHandler);
                 }
             }
         }
@@ -2218,9 +2169,8 @@ public class StorageProxy implements StorageProxyMBean
             {
                 for (InetAddress endpoint : toQuery.filteredEndpoints)
                 {
-                    MessageOut<ReadCommand> message = rangeCommand.createMessage(MessagingService.instance().getVersion(endpoint));
                     Tracing.trace("Enqueuing request to {}", endpoint);
-                    MessagingService.instance().sendRRWithFailure(message, endpoint, handler);
+                    MessagingService.instance().sendRRWithFailure(rangeCommand.createMessage(), endpoint, handler);
                 }
             }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 07eb1d8..62efed2 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -257,12 +257,14 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
 
         legacyProgressSupport = new LegacyJMXProgressSupport(this, jmxObjectName);
 
+        ReadCommandVerbHandler readHandler = new ReadCommandVerbHandler();
+
         /* register the verb handlers */
         MessagingService.instance().registerVerbHandlers(MessagingService.Verb.MUTATION, new MutationVerbHandler());
         MessagingService.instance().registerVerbHandlers(MessagingService.Verb.READ_REPAIR, new ReadRepairVerbHandler());
-        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.READ, new ReadCommandVerbHandler());
-        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.RANGE_SLICE, new RangeSliceVerbHandler());
-        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAGED_RANGE, new RangeSliceVerbHandler());
+        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.READ, readHandler);
+        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.RANGE_SLICE, readHandler);
+        MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAGED_RANGE, readHandler);
         MessagingService.instance().registerVerbHandlers(MessagingService.Verb.COUNTER_MUTATION, new CounterMutationVerbHandler());
         MessagingService.instance().registerVerbHandlers(MessagingService.Verb.TRUNCATE, new TruncateVerbHandler());
         MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAXOS_PREPARE, new PrepareVerbHandler());
@@ -2082,8 +2084,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
 
     public boolean isRpcReady(InetAddress endpoint)
     {
-        return MessagingService.instance().getVersion(endpoint) < MessagingService.VERSION_22 ||
-                Gossiper.instance.getEndpointStateForEndpoint(endpoint).isRpcReady();
+        return Gossiper.instance.getEndpointStateForEndpoint(endpoint).isRpcReady();
     }
 
     public void setRpcReady(boolean value)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/paxos/Commit.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/paxos/Commit.java b/src/java/org/apache/cassandra/service/paxos/Commit.java
index af94869..3b0364c 100644
--- a/src/java/org/apache/cassandra/service/paxos/Commit.java
+++ b/src/java/org/apache/cassandra/service/paxos/Commit.java
@@ -113,32 +113,20 @@ public class Commit
     {
         public void serialize(Commit commit, DataOutputPlus out, int version) throws IOException
         {
-            if (version < MessagingService.VERSION_30)
-                ByteBufferUtil.writeWithShortLength(commit.update.partitionKey().getKey(), out);
-
             UUIDSerializer.serializer.serialize(commit.ballot, out, version);
             PartitionUpdate.serializer.serialize(commit.update, out, version);
         }
 
         public Commit deserialize(DataInputPlus in, int version) throws IOException
         {
-            ByteBuffer key = null;
-            if (version < MessagingService.VERSION_30)
-                key = ByteBufferUtil.readWithShortLength(in);
-
             UUID ballot = UUIDSerializer.serializer.deserialize(in, version);
-            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, SerializationHelper.Flag.LOCAL, key);
+            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, SerializationHelper.Flag.LOCAL);
             return new Commit(ballot, update);
         }
 
         public long serializedSize(Commit commit, int version)
         {
-            int size = 0;
-            if (version < MessagingService.VERSION_30)
-                size += ByteBufferUtil.serializedSizeWithShortLength(commit.update.partitionKey().getKey());
-
-            return size
-                 + UUIDSerializer.serializer.serializedSize(commit.ballot, version)
+            return UUIDSerializer.serializer.serializedSize(commit.ballot, version)
                  + PartitionUpdate.serializer.serializedSize(commit.update, version);
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/service/paxos/PrepareResponse.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/paxos/PrepareResponse.java b/src/java/org/apache/cassandra/service/paxos/PrepareResponse.java
index f843b8d..d8699c8 100644
--- a/src/java/org/apache/cassandra/service/paxos/PrepareResponse.java
+++ b/src/java/org/apache/cassandra/service/paxos/PrepareResponse.java
@@ -69,51 +69,22 @@ public class PrepareResponse
         {
             out.writeBoolean(response.promised);
             Commit.serializer.serialize(response.inProgressCommit, out, version);
-
-            if (version < MessagingService.VERSION_30)
-            {
-                UUIDSerializer.serializer.serialize(response.mostRecentCommit.ballot, out, version);
-                PartitionUpdate.serializer.serialize(response.mostRecentCommit.update, out, version);
-            }
-            else
-            {
-                Commit.serializer.serialize(response.mostRecentCommit, out, version);
-            }
+            Commit.serializer.serialize(response.mostRecentCommit, out, version);
         }
 
         public PrepareResponse deserialize(DataInputPlus in, int version) throws IOException
         {
             boolean success = in.readBoolean();
             Commit inProgress = Commit.serializer.deserialize(in, version);
-            Commit mostRecent;
-            if (version < MessagingService.VERSION_30)
-            {
-                UUID ballot = UUIDSerializer.serializer.deserialize(in, version);
-                PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, SerializationHelper.Flag.LOCAL, inProgress.update.partitionKey());
-                mostRecent = new Commit(ballot, update);
-            }
-            else
-            {
-                mostRecent = Commit.serializer.deserialize(in, version);
-            }
+            Commit mostRecent = Commit.serializer.deserialize(in, version);
             return new PrepareResponse(success, inProgress, mostRecent);
         }
 
         public long serializedSize(PrepareResponse response, int version)
         {
-            long size = TypeSizes.sizeof(response.promised)
-                      + Commit.serializer.serializedSize(response.inProgressCommit, version);
-
-            if (version < MessagingService.VERSION_30)
-            {
-                size += UUIDSerializer.serializer.serializedSize(response.mostRecentCommit.ballot, version);
-                size += PartitionUpdate.serializer.serializedSize(response.mostRecentCommit.update, version);
-            }
-            else
-            {
-                size += Commit.serializer.serializedSize(response.mostRecentCommit, version);
-            }
-            return size;
+            return TypeSizes.sizeof(response.promised)
+                 + Commit.serializer.serializedSize(response.inProgressCommit, version)
+                 + Commit.serializer.serializedSize(response.mostRecentCommit, version);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/streaming/StreamReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/StreamReader.java b/src/java/org/apache/cassandra/streaming/StreamReader.java
index 6465bf7..fab9372 100644
--- a/src/java/org/apache/cassandra/streaming/StreamReader.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReader.java
@@ -196,16 +196,7 @@ public class StreamReader
                                   long totalSize, UUID sessionId) throws IOException
         {
             this.metadata = metadata;
-            // streaming pre-3.0 sstables require mark/reset support from source stream
-            if (version.correspondingMessagingVersion() < MessagingService.VERSION_30)
-            {
-                logger.trace("Initializing rewindable input stream for reading legacy sstable with {} bytes with following " +
-                             "parameters: initial_mem_buffer_size={}, max_mem_buffer_size={}, max_spill_file_size={}.",
-                             totalSize, INITIAL_MEM_BUFFER_SIZE, MAX_MEM_BUFFER_SIZE, MAX_SPILL_FILE_SIZE);
-                File bufferFile = getTempBufferFile(metadata, totalSize, sessionId);
-                this.in = new RewindableDataInputStreamPlus(in, INITIAL_MEM_BUFFER_SIZE, MAX_MEM_BUFFER_SIZE, bufferFile, MAX_SPILL_FILE_SIZE);
-            } else
-                this.in = new DataInputPlus.DataInputStreamPlus(in);
+            this.in = new DataInputPlus.DataInputStreamPlus(in);
             this.helper = new SerializationHelper(metadata, version.correspondingMessagingVersion(), SerializationHelper.Flag.PRESERVE_SIZE);
             this.header = header;
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java
index 70b5765..2044d4d 100644
--- a/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java
+++ b/src/java/org/apache/cassandra/streaming/compress/CompressedStreamReader.java
@@ -35,6 +35,7 @@ import org.apache.cassandra.streaming.ProgressInfo;
 import org.apache.cassandra.streaming.StreamReader;
 import org.apache.cassandra.streaming.StreamSession;
 import org.apache.cassandra.streaming.messages.FileMessageHeader;
+import org.apache.cassandra.utils.ChecksumType;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Pair;
 
@@ -81,7 +82,7 @@ public class CompressedStreamReader extends StreamReader
                      cfs.getColumnFamilyName());
 
         CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo,
-                                                              inputVersion.compressedChecksumType(), cfs::getCrcCheckChance);
+                                                              ChecksumType.CRC32, cfs::getCrcCheckChance);
         TrackedInputStream in = new TrackedInputStream(cis);
 
         StreamDeserializer deserializer = new StreamDeserializer(cfs.metadata, in, inputVersion, getHeader(cfs.metadata),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/streaming/messages/FileMessageHeader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/messages/FileMessageHeader.java b/src/java/org/apache/cassandra/streaming/messages/FileMessageHeader.java
index 232727d..b0639ea 100644
--- a/src/java/org/apache/cassandra/streaming/messages/FileMessageHeader.java
+++ b/src/java/org/apache/cassandra/streaming/messages/FileMessageHeader.java
@@ -189,13 +189,7 @@ public class FileMessageHeader
             UUIDSerializer.serializer.serialize(header.cfId, out, version);
             out.writeInt(header.sequenceNumber);
             out.writeUTF(header.version.toString());
-
-            //We can't stream to a node that doesn't understand a new sstable format
-            if (version < StreamMessage.VERSION_22 && header.format != SSTableFormat.Type.LEGACY && header.format != SSTableFormat.Type.BIG)
-                throw new UnsupportedOperationException("Can't stream non-legacy sstables to nodes < 2.2");
-
-            if (version >= StreamMessage.VERSION_22)
-                out.writeUTF(header.format.name);
+            out.writeUTF(header.format.name);
 
             out.writeLong(header.estimatedKeys);
             out.writeInt(header.sections.size());
@@ -212,8 +206,7 @@ public class FileMessageHeader
             out.writeLong(header.repairedAt);
             out.writeInt(header.sstableLevel);
 
-            if (version >= StreamMessage.VERSION_30 && header.version.storeRows())
-                SerializationHeader.serializer.serialize(header.version, header.header, out);
+            SerializationHeader.serializer.serialize(header.version, header.header, out);
             return compressionInfo;
         }
 
@@ -222,10 +215,7 @@ public class FileMessageHeader
             UUID cfId = UUIDSerializer.serializer.deserialize(in, MessagingService.current_version);
             int sequenceNumber = in.readInt();
             Version sstableVersion = SSTableFormat.Type.current().info.getVersion(in.readUTF());
-
-            SSTableFormat.Type format = SSTableFormat.Type.LEGACY;
-            if (version >= StreamMessage.VERSION_22)
-                format = SSTableFormat.Type.validate(in.readUTF());
+            SSTableFormat.Type format = SSTableFormat.Type.validate(in.readUTF());
 
             long estimatedKeys = in.readLong();
             int count = in.readInt();
@@ -235,9 +225,7 @@ public class FileMessageHeader
             CompressionInfo compressionInfo = CompressionInfo.serializer.deserialize(in, MessagingService.current_version);
             long repairedAt = in.readLong();
             int sstableLevel = in.readInt();
-            SerializationHeader.Component header = version >= StreamMessage.VERSION_30 && sstableVersion.storeRows()
-                                                 ? SerializationHeader.serializer.deserialize(sstableVersion, in)
-                                                 : null;
+            SerializationHeader.Component header =  SerializationHeader.serializer.deserialize(sstableVersion, in);
 
             return new FileMessageHeader(cfId, sequenceNumber, sstableVersion, format, estimatedKeys, sections, compressionInfo, repairedAt, sstableLevel, header);
         }
@@ -247,10 +235,7 @@ public class FileMessageHeader
             long size = UUIDSerializer.serializer.serializedSize(header.cfId, version);
             size += TypeSizes.sizeof(header.sequenceNumber);
             size += TypeSizes.sizeof(header.version.toString());
-
-            if (version >= StreamMessage.VERSION_22)
-                size += TypeSizes.sizeof(header.format.name);
-
+            size += TypeSizes.sizeof(header.format.name);
             size += TypeSizes.sizeof(header.estimatedKeys);
 
             size += TypeSizes.sizeof(header.sections.size());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java b/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
index 7487aaf..3ce1958 100644
--- a/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
+++ b/src/java/org/apache/cassandra/streaming/messages/StreamMessage.java
@@ -33,8 +33,6 @@ import org.apache.cassandra.streaming.StreamSession;
 public abstract class StreamMessage
 {
     /** Streaming protocol version */
-    public static final int VERSION_20 = 2;
-    public static final int VERSION_22 = 3;
     public static final int VERSION_30 = 4;
     public static final int CURRENT_VERSION = VERSION_30;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/tools/SSTableExport.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java
index 070434d..52d5ecf 100644
--- a/src/java/org/apache/cassandra/tools/SSTableExport.java
+++ b/src/java/org/apache/cassandra/tools/SSTableExport.java
@@ -93,8 +93,8 @@ public class SSTableExport
      */
     public static CFMetaData metadataFromSSTable(Descriptor desc) throws IOException
     {
-        if (!desc.version.storeRows())
-            throw new IOException("pre-3.0 SSTable is not supported.");
+        if (!desc.version.isCompatible())
+            throw new IOException("Cannot process old and unsupported SSTable version.");
 
         EnumSet<MetadataType> types = EnumSet.of(MetadataType.STATS, MetadataType.HEADER);
         Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
@@ -162,11 +162,6 @@ public class SSTableExport
                         : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
         String ssTableFileName = new File(cmd.getArgs()[0]).getAbsolutePath();
 
-        if (Descriptor.isLegacyFile(new File(ssTableFileName)))
-        {
-            System.err.println("Unsupported legacy sstable");
-            System.exit(1);
-        }
         if (!new File(ssTableFileName).exists())
         {
             System.err.println("Cannot find file " + ssTableFileName);


[07/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java
index ce42126..ad0f3c9 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableSimpleIterator.java
@@ -54,18 +54,12 @@ public abstract class SSTableSimpleIterator extends AbstractIterator<Unfiltered>
 
     public static SSTableSimpleIterator create(CFMetaData metadata, DataInputPlus in, SerializationHeader header, SerializationHelper helper, DeletionTime partitionDeletion)
     {
-        if (helper.version < MessagingService.VERSION_30)
-            return new OldFormatIterator(metadata, in, helper, partitionDeletion);
-        else
-            return new CurrentFormatIterator(metadata, in, header, helper);
+        return new CurrentFormatIterator(metadata, in, header, helper);
     }
 
     public static SSTableSimpleIterator createTombstoneOnly(CFMetaData metadata, DataInputPlus in, SerializationHeader header, SerializationHelper helper, DeletionTime partitionDeletion)
     {
-        if (helper.version < MessagingService.VERSION_30)
-            return new OldFormatTombstoneIterator(metadata, in, helper, partitionDeletion);
-        else
-            return new CurrentFormatTombstoneIterator(metadata, in, header, helper);
+        return new CurrentFormatTombstoneIterator(metadata, in, header, helper);
     }
 
     public abstract Row readStaticRow() throws IOException;
@@ -136,106 +130,4 @@ public abstract class SSTableSimpleIterator extends AbstractIterator<Unfiltered>
             }
         }
     }
-
-    private static class OldFormatIterator extends SSTableSimpleIterator
-    {
-        private final UnfilteredDeserializer deserializer;
-
-        private OldFormatIterator(CFMetaData metadata, DataInputPlus in, SerializationHelper helper, DeletionTime partitionDeletion)
-        {
-            super(metadata, in, helper);
-            // We use an UnfilteredDeserializer because even though we don't need all it's fanciness, it happens to handle all
-            // the details we need for reading the old format.
-            this.deserializer = UnfilteredDeserializer.create(metadata, in, null, helper, partitionDeletion, false);
-        }
-
-        public Row readStaticRow() throws IOException
-        {
-            if (metadata.isCompactTable())
-            {
-                // For static compact tables, in the old format, static columns are intermingled with the other columns, so we
-                // need to extract them. Which imply 2 passes (one to extract the static, then one for other value).
-                if (metadata.isStaticCompactTable())
-                {
-                    assert in instanceof RewindableDataInput;
-                    RewindableDataInput file = (RewindableDataInput)in;
-                    DataPosition mark = file.mark();
-                    Row staticRow = LegacyLayout.extractStaticColumns(metadata, file, metadata.partitionColumns().statics);
-                    file.reset(mark);
-
-                    // We've extracted the static columns, so we must ignore them on the 2nd pass
-                    ((UnfilteredDeserializer.OldFormatDeserializer)deserializer).setSkipStatic();
-                    return staticRow;
-                }
-                else
-                {
-                    return Rows.EMPTY_STATIC_ROW;
-                }
-            }
-
-            return deserializer.hasNext() && deserializer.nextIsStatic()
-                 ? (Row)deserializer.readNext()
-                 : Rows.EMPTY_STATIC_ROW;
-
-        }
-
-        protected Unfiltered computeNext()
-        {
-            while (true)
-            {
-                try
-                {
-                    if (!deserializer.hasNext())
-                        return endOfData();
-
-                    Unfiltered unfiltered = deserializer.readNext();
-                    if (metadata.isStaticCompactTable() && unfiltered.kind() == Unfiltered.Kind.ROW)
-                    {
-                        Row row = (Row) unfiltered;
-                        ColumnDefinition def = metadata.getColumnDefinition(LegacyLayout.encodeClustering(metadata, row.clustering()));
-                        if (def != null && def.isStatic())
-                            continue;
-                    }
-                    return unfiltered;
-                }
-                catch (IOException e)
-                {
-                    throw new IOError(e);
-                }
-            }
-        }
-
-    }
-
-    private static class OldFormatTombstoneIterator extends OldFormatIterator
-    {
-        private OldFormatTombstoneIterator(CFMetaData metadata, DataInputPlus in, SerializationHelper helper, DeletionTime partitionDeletion)
-        {
-            super(metadata, in, helper, partitionDeletion);
-        }
-
-        public Row readStaticRow() throws IOException
-        {
-            Row row = super.readStaticRow();
-            if (!row.deletion().isLive())
-                return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion());
-            return Rows.EMPTY_STATIC_ROW;
-        }
-
-        protected Unfiltered computeNext()
-        {
-            while (true)
-            {
-                Unfiltered unfiltered = super.computeNext();
-                if (unfiltered == null || unfiltered.isRangeTombstoneMarker())
-                    return unfiltered;
-
-                Row row = (Row) unfiltered;
-                if (!row.deletion().isLive())
-                    return BTreeRow.emptyDeletedRow(row.clustering(), row.deletion());
-                // Otherwise read next.
-            }
-        }
-
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
index 015c5bb..323b1bd 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableTxnWriter.java
@@ -148,14 +148,8 @@ public class SSTableTxnWriter extends Transactional.AbstractTransactional implem
         return new SSTableTxnWriter(txn, writer);
     }
 
-    public static SSTableTxnWriter create(ColumnFamilyStore cfs, String filename, long keyCount, long repairedAt, int sstableLevel, SerializationHeader header)
+    public static SSTableTxnWriter create(ColumnFamilyStore cfs, Descriptor desc, long keyCount, long repairedAt, SerializationHeader header)
     {
-        Descriptor desc = Descriptor.fromFilename(filename);
-        return create(cfs, desc, keyCount, repairedAt, sstableLevel, header);
-    }
-
-    public static SSTableTxnWriter create(ColumnFamilyStore cfs, String filename, long keyCount, long repairedAt, SerializationHeader header)
-    {
-        return create(cfs, filename, keyCount, repairedAt, 0, header);
+        return create(cfs, desc, keyCount, repairedAt, 0, header);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
index 3665da7..89c064b 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/RangeAwareSSTableWriter.java
@@ -68,7 +68,7 @@ public class RangeAwareSSTableWriter implements SSTableMultiWriter
             if (localDir == null)
                 throw new IOException(String.format("Insufficient disk space to store %s",
                                                     FBUtilities.prettyPrintMemory(totalSize)));
-            Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(cfs.getDirectories().getLocationForDisk(localDir), format));
+            Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(localDir), format);
             currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, sstableLevel, header, txn);
         }
     }
@@ -90,7 +90,7 @@ public class RangeAwareSSTableWriter implements SSTableMultiWriter
             if (currentWriter != null)
                 finishedWriters.add(currentWriter);
 
-            Descriptor desc = Descriptor.fromFilename(cfs.getSSTablePath(cfs.getDirectories().getLocationForDisk(directories[currentIndex])), format);
+            Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(directories[currentIndex]), format);
             currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, sstableLevel, header, txn);
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/SSTableFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableFormat.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableFormat.java
index 4391946..29e29ef 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableFormat.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableFormat.java
@@ -41,10 +41,6 @@ public interface SSTableFormat
 
     public static enum Type
     {
-        //Used internally to refer to files with no
-        //format flag in the filename
-        LEGACY("big", BigFormat.instance),
-
         //The original sstable format
         BIG("big", BigFormat.instance);
 
@@ -70,10 +66,6 @@ public interface SSTableFormat
         {
             for (Type valid : Type.values())
             {
-                //This is used internally for old sstables
-                if (valid == LEGACY)
-                    continue;
-
                 if (valid.name.equalsIgnoreCase(name))
                     return valid;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index 1a2e1b0..add8ddc 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -253,58 +253,48 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
     {
         long count = -1;
 
-        // check if cardinality estimator is available for all SSTables
-        boolean cardinalityAvailable = !Iterables.isEmpty(sstables) && Iterables.all(sstables, new Predicate<SSTableReader>()
-        {
-            public boolean apply(SSTableReader sstable)
-            {
-                return sstable.descriptor.version.hasNewStatsFile();
-            }
-        });
+        if (Iterables.isEmpty(sstables))
+            return count;
 
-        // if it is, load them to estimate key count
-        if (cardinalityAvailable)
+        boolean failed = false;
+        ICardinality cardinality = null;
+        for (SSTableReader sstable : sstables)
         {
-            boolean failed = false;
-            ICardinality cardinality = null;
-            for (SSTableReader sstable : sstables)
-            {
-                if (sstable.openReason == OpenReason.EARLY)
-                    continue;
-
-                try
-                {
-                    CompactionMetadata metadata = (CompactionMetadata) sstable.descriptor.getMetadataSerializer().deserialize(sstable.descriptor, MetadataType.COMPACTION);
-                    // If we can't load the CompactionMetadata, we are forced to estimate the keys using the index
-                    // summary. (CASSANDRA-10676)
-                    if (metadata == null)
-                    {
-                        logger.warn("Reading cardinality from Statistics.db failed for {}", sstable.getFilename());
-                        failed = true;
-                        break;
-                    }
+            if (sstable.openReason == OpenReason.EARLY)
+                continue;
 
-                    if (cardinality == null)
-                        cardinality = metadata.cardinalityEstimator;
-                    else
-                        cardinality = cardinality.merge(metadata.cardinalityEstimator);
-                }
-                catch (IOException e)
-                {
-                    logger.warn("Reading cardinality from Statistics.db failed.", e);
-                    failed = true;
-                    break;
-                }
-                catch (CardinalityMergeException e)
+            try
+            {
+                CompactionMetadata metadata = (CompactionMetadata) sstable.descriptor.getMetadataSerializer().deserialize(sstable.descriptor, MetadataType.COMPACTION);
+                // If we can't load the CompactionMetadata, we are forced to estimate the keys using the index
+                // summary. (CASSANDRA-10676)
+                if (metadata == null)
                 {
-                    logger.warn("Cardinality merge failed.", e);
+                    logger.warn("Reading cardinality from Statistics.db failed for {}", sstable.getFilename());
                     failed = true;
                     break;
                 }
+
+                if (cardinality == null)
+                    cardinality = metadata.cardinalityEstimator;
+                else
+                    cardinality = cardinality.merge(metadata.cardinalityEstimator);
+            }
+            catch (IOException e)
+            {
+                logger.warn("Reading cardinality from Statistics.db failed.", e);
+                failed = true;
+                break;
+            }
+            catch (CardinalityMergeException e)
+            {
+                logger.warn("Cardinality merge failed.", e);
+                failed = true;
+                break;
             }
-            if (cardinality != null && !failed)
-                count = cardinality.cardinality();
         }
+        if (cardinality != null && !failed)
+            count = cardinality.cardinality();
 
         // if something went wrong above or cardinality is not available, calculate using index summary
         if (count < 0)
@@ -481,14 +471,14 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
         assert !validate || components.contains(Component.PRIMARY_INDEX) : "Primary index component is missing for sstable " + descriptor;
 
         // For the 3.0+ sstable format, the (misnomed) stats component hold the serialization header which we need to deserialize the sstable content
-        assert !descriptor.version.storeRows() || components.contains(Component.STATS) : "Stats component is missing for sstable " + descriptor;
+        assert components.contains(Component.STATS) : "Stats component is missing for sstable " + descriptor;
 
         EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
         Map<MetadataType, MetadataComponent> sstableMetadata = descriptor.getMetadataSerializer().deserialize(descriptor, types);
         ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
         StatsMetadata statsMetadata = (StatsMetadata) sstableMetadata.get(MetadataType.STATS);
         SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
-        assert !descriptor.version.storeRows() || header != null;
+        assert header != null;
 
         // Check if sstable is created using same partitioner.
         // Partitioner can be null, which indicates older version of sstable or no stats available.
@@ -730,7 +720,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
         {
             // bf is enabled and fp chance matches the currently configured value.
             load(false, true);
-            loadBloomFilter(descriptor.version.hasOldBfHashOrder());
+            loadBloomFilter();
         }
     }
 
@@ -739,11 +729,11 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
      *
      * @throws IOException
      */
-    private void loadBloomFilter(boolean oldBfHashOrder) throws IOException
+    private void loadBloomFilter() throws IOException
     {
         try (DataInputStream stream = new DataInputStream(new BufferedInputStream(new FileInputStream(descriptor.filenameFor(Component.FILTER)))))
         {
-            bf = FilterFactory.deserialize(stream, true, oldBfHashOrder);
+            bf = FilterFactory.deserialize(stream, true);
         }
     }
 
@@ -829,7 +819,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
                     : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
 
             if (recreateBloomFilter)
-                bf = FilterFactory.getFilter(estimatedKeys, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder());
+                bf = FilterFactory.getFilter(estimatedKeys, metadata.params.bloomFilterFpChance, true);
 
             try (IndexSummaryBuilder summaryBuilder = summaryLoaded ? null : new IndexSummaryBuilder(estimatedKeys, metadata.params.minIndexInterval, samplingLevel))
             {
@@ -883,7 +873,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
         {
             iStream = new DataInputStream(new FileInputStream(summariesFile));
             indexSummary = IndexSummary.serializer.deserialize(
-                    iStream, getPartitioner(), descriptor.version.hasSamplingLevel(),
+                    iStream, getPartitioner(),
                     metadata.params.minIndexInterval, metadata.params.maxIndexInterval);
             first = decorateKey(ByteBufferUtil.readWithLength(iStream));
             last = decorateKey(ByteBufferUtil.readWithLength(iStream));
@@ -932,7 +922,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
 
         try (DataOutputStreamPlus oStream = new BufferedDataOutputStreamPlus(new FileOutputStream(summariesFile));)
         {
-            IndexSummary.serializer.serialize(summary, oStream, descriptor.version.hasSamplingLevel());
+            IndexSummary.serializer.serialize(summary, oStream);
             ByteBufferUtil.writeWithLength(first.getKey(), oStream);
             ByteBufferUtil.writeWithLength(last.getKey(), oStream);
         }
@@ -1106,8 +1096,6 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
     @SuppressWarnings("resource")
     public SSTableReader cloneWithNewSummarySamplingLevel(ColumnFamilyStore parent, int samplingLevel) throws IOException
     {
-        assert descriptor.version.hasSamplingLevel();
-
         synchronized (tidy.global)
         {
             assert openReason != OpenReason.EARLY;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
index 9fb5f7c..874c679 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
@@ -127,26 +127,14 @@ public abstract class SSTableWriter extends SSTable implements Transactional
         return create(descriptor, keyCount, repairedAt, metadata, collector, header, indexes, txn);
     }
 
-    public static SSTableWriter create(String filename,
-                                       long keyCount,
-                                       long repairedAt,
-                                       int sstableLevel,
-                                       SerializationHeader header,
-                                       Collection<Index> indexes,
-                                       LifecycleTransaction txn)
-    {
-        return create(Descriptor.fromFilename(filename), keyCount, repairedAt, sstableLevel, header, indexes, txn);
-    }
-
     @VisibleForTesting
-    public static SSTableWriter create(String filename,
+    public static SSTableWriter create(Descriptor descriptor,
                                        long keyCount,
                                        long repairedAt,
                                        SerializationHeader header,
                                        Collection<Index> indexes,
                                        LifecycleTransaction txn)
     {
-        Descriptor descriptor = Descriptor.fromFilename(filename);
         return create(descriptor, keyCount, repairedAt, 0, header, indexes, txn);
     }
 
@@ -157,7 +145,7 @@ public abstract class SSTableWriter extends SSTable implements Transactional
                 Component.STATS,
                 Component.SUMMARY,
                 Component.TOC,
-                Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType())));
+                Component.DIGEST));
 
         if (metadata.params.bloomFilterFpChance < 1.0)
             components.add(Component.FILTER);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/Version.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/Version.java b/src/java/org/apache/cassandra/io/sstable/format/Version.java
index 96c5a6e..b78e434 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/Version.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/Version.java
@@ -46,30 +46,8 @@ public abstract class Version
 
     public abstract boolean isLatestVersion();
 
-    public abstract boolean hasSamplingLevel();
-
-    public abstract boolean hasNewStatsFile();
-
-    public abstract ChecksumType compressedChecksumType();
-
-    public abstract ChecksumType uncompressedChecksumType();
-
-    public abstract boolean hasRepairedAt();
-
-    public abstract boolean tracksLegacyCounterShards();
-
-    public abstract boolean hasNewFileName();
-
-    public abstract boolean storeRows();
-
     public abstract int correspondingMessagingVersion(); // Only use by storage that 'storeRows' so far
 
-    public abstract boolean hasOldBfHashOrder();
-
-    public abstract boolean hasCompactionAncestors();
-
-    public abstract boolean hasBoundaries();
-
     public abstract boolean hasCommitLogLowerBound();
 
     public abstract boolean hasCommitLogIntervals();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
index 3846194..980eed0 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigFormat.java
@@ -111,16 +111,8 @@ public class BigFormat implements SSTableFormat
     static class BigVersion extends Version
     {
         public static final String current_version = "mc";
-        public static final String earliest_supported_version = "jb";
+        public static final String earliest_supported_version = "ma";
 
-        // jb (2.0.1): switch from crc32 to adler32 for compression checksums
-        //             checksum the compressed data
-        // ka (2.1.0): new Statistics.db file format
-        //             index summaries can be downsampled and the sampling level is persisted
-        //             switch uncompressed checksums to adler32
-        //             tracks presense of legacy (local and remote) counter shards
-        // la (2.2.0): new file name format
-        // lb (2.2.7): commit log lower bound included
         // ma (3.0.0): swap bf hash order
         //             store rows natively
         // mb (3.0.7, 3.7): commit log lower bound included
@@ -129,62 +121,17 @@ public class BigFormat implements SSTableFormat
         // NOTE: when adding a new version, please add that to LegacySSTableTest, too.
 
         private final boolean isLatestVersion;
-        private final boolean hasSamplingLevel;
-        private final boolean newStatsFile;
-        private final ChecksumType compressedChecksumType;
-        private final ChecksumType uncompressedChecksumType;
-        private final boolean hasRepairedAt;
-        private final boolean tracksLegacyCounterShards;
-        private final boolean newFileName;
-        public final boolean storeRows;
-        public final int correspondingMessagingVersion; // Only use by storage that 'storeRows' so far
-        public final boolean hasBoundaries;
-        /**
-         * CASSANDRA-8413: 3.0 bloom filter representation changed (two longs just swapped)
-         * have no 'static' bits caused by using the same upper bits for both bloom filter and token distribution.
-         */
-        private final boolean hasOldBfHashOrder;
+        public final int correspondingMessagingVersion;
         private final boolean hasCommitLogLowerBound;
         private final boolean hasCommitLogIntervals;
 
-        /**
-         * CASSANDRA-7066: compaction ancerstors are no longer used and have been removed.
-         */
-        private final boolean hasCompactionAncestors;
-
         BigVersion(String version)
         {
             super(instance, version);
 
             isLatestVersion = version.compareTo(current_version) == 0;
-            hasSamplingLevel = version.compareTo("ka") >= 0;
-            newStatsFile = version.compareTo("ka") >= 0;
-
-            //For a while Adler32 was in use, now the CRC32 instrinsic is very good especially after Haswell
-            //PureJavaCRC32 was always faster than Adler32. See CASSANDRA-8684
-            ChecksumType checksumType = ChecksumType.CRC32;
-            if (version.compareTo("ka") >= 0 && version.compareTo("ma") < 0)
-                checksumType = ChecksumType.Adler32;
-            this.uncompressedChecksumType = checksumType;
-
-            checksumType = ChecksumType.CRC32;
-            if (version.compareTo("jb") >= 0 && version.compareTo("ma") < 0)
-                checksumType = ChecksumType.Adler32;
-            this.compressedChecksumType = checksumType;
-
-            hasRepairedAt = version.compareTo("ka") >= 0;
-            tracksLegacyCounterShards = version.compareTo("ka") >= 0;
+            correspondingMessagingVersion = MessagingService.VERSION_30;
 
-            newFileName = version.compareTo("la") >= 0;
-
-            hasOldBfHashOrder = version.compareTo("ma") < 0;
-            hasCompactionAncestors = version.compareTo("ma") < 0;
-            storeRows = version.compareTo("ma") >= 0;
-            correspondingMessagingVersion = storeRows
-                                          ? MessagingService.VERSION_30
-                                          : MessagingService.VERSION_21;
-
-            hasBoundaries = version.compareTo("ma") < 0;
             hasCommitLogLowerBound = (version.compareTo("lb") >= 0 && version.compareTo("ma") < 0)
                                      || version.compareTo("mb") >= 0;
             hasCommitLogIntervals = version.compareTo("mc") >= 0;
@@ -197,60 +144,6 @@ public class BigFormat implements SSTableFormat
         }
 
         @Override
-        public boolean hasSamplingLevel()
-        {
-            return hasSamplingLevel;
-        }
-
-        @Override
-        public boolean hasNewStatsFile()
-        {
-            return newStatsFile;
-        }
-
-        @Override
-        public ChecksumType compressedChecksumType()
-        {
-            return compressedChecksumType;
-        }
-
-        @Override
-        public ChecksumType uncompressedChecksumType()
-        {
-            return uncompressedChecksumType;
-        }
-
-        @Override
-        public boolean hasRepairedAt()
-        {
-            return hasRepairedAt;
-        }
-
-        @Override
-        public boolean tracksLegacyCounterShards()
-        {
-            return tracksLegacyCounterShards;
-        }
-
-        @Override
-        public boolean hasOldBfHashOrder()
-        {
-            return hasOldBfHashOrder;
-        }
-
-        @Override
-        public boolean hasCompactionAncestors()
-        {
-            return hasCompactionAncestors;
-        }
-
-        @Override
-        public boolean hasNewFileName()
-        {
-            return newFileName;
-        }
-
-        @Override
         public boolean hasCommitLogLowerBound()
         {
             return hasCommitLogLowerBound;
@@ -263,24 +156,12 @@ public class BigFormat implements SSTableFormat
         }
 
         @Override
-        public boolean storeRows()
-        {
-            return storeRows;
-        }
-
-        @Override
         public int correspondingMessagingVersion()
         {
             return correspondingMessagingVersion;
         }
 
         @Override
-        public boolean hasBoundaries()
-        {
-            return hasBoundaries;
-        }
-
-        @Override
         public boolean isCompatible()
         {
             return version.compareTo(earliest_supported_version) >= 0 && version.charAt(0) <= current_version.charAt(0);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
index c3139a3..018edac 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
@@ -84,7 +84,7 @@ public class BigTableWriter extends SSTableWriter
         {
             dataFile = new CompressedSequentialWriter(new File(getFilename()),
                                              descriptor.filenameFor(Component.COMPRESSION_INFO),
-                                             new File(descriptor.filenameFor(descriptor.digestComponent)),
+                                             new File(descriptor.filenameFor(Component.DIGEST)),
                                              writerOption,
                                              metadata.params.compression,
                                              metadataCollector);
@@ -93,7 +93,7 @@ public class BigTableWriter extends SSTableWriter
         {
             dataFile = new ChecksummedSequentialWriter(new File(getFilename()),
                     new File(descriptor.filenameFor(Component.CRC)),
-                    new File(descriptor.filenameFor(descriptor.digestComponent)),
+                    new File(descriptor.filenameFor(Component.DIGEST)),
                     writerOption);
         }
         dbuilder = new FileHandle.Builder(descriptor.filenameFor(Component.DATA)).compressed(compression)
@@ -442,7 +442,7 @@ public class BigTableWriter extends SSTableWriter
             builder = new FileHandle.Builder(descriptor.filenameFor(Component.PRIMARY_INDEX)).mmapped(DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap);
             chunkCache.ifPresent(builder::withChunkCache);
             summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL);
-            bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder());
+            bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true);
             // register listeners to be alerted when the data files are flushed
             indexFile.setPostFlushListener(() -> summary.markIndexSynced(indexFile.getLastFlushOffset()));
             dataFile.setPostFlushListener(() -> summary.markDataSynced(dataFile.getLastFlushOffset()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/metadata/CompactionMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/CompactionMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/CompactionMetadata.java
index ef3453a..c9dfe39 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/CompactionMetadata.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/CompactionMetadata.java
@@ -75,30 +75,17 @@ public class CompactionMetadata extends MetadataComponent
         public int serializedSize(Version version, CompactionMetadata component) throws IOException
         {
             int sz = 0;
-            if (version.hasCompactionAncestors())
-            {   // write empty ancestor marker
-                sz = 4;
-            }
             byte[] serializedCardinality = component.cardinalityEstimator.getBytes();
             return TypeSizes.sizeof(serializedCardinality.length) + serializedCardinality.length + sz;
         }
 
         public void serialize(Version version, CompactionMetadata component, DataOutputPlus out) throws IOException
         {
-            if (version.hasCompactionAncestors())
-            {   // write empty ancestor marker
-                out.writeInt(0);
-            }
             ByteBufferUtil.writeWithLength(component.cardinalityEstimator.getBytes(), out);
         }
 
         public CompactionMetadata deserialize(Version version, DataInputPlus in) throws IOException
         {
-            if (version.hasCompactionAncestors())
-            { // skip ancestors
-                int nbAncestors = in.readInt();
-                in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors));
-            }
             ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt()));
             return new CompactionMetadata(cardinality);
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java
deleted file mode 100644
index 6cc33f5..0000000
--- a/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.io.sstable.metadata;
-
-import java.io.*;
-import java.nio.ByteBuffer;
-import java.util.*;
-
-import org.apache.cassandra.db.TypeSizes;
-import org.apache.cassandra.db.commitlog.CommitLogPosition;
-import org.apache.cassandra.db.commitlog.IntervalSet;
-import org.apache.cassandra.io.sstable.Component;
-import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.format.Version;
-import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.service.ActiveRepairService;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.EstimatedHistogram;
-import org.apache.cassandra.utils.StreamingHistogram;
-
-import static org.apache.cassandra.io.sstable.metadata.StatsMetadata.commitLogPositionSetSerializer;
-
-/**
- * Serializer for SSTable from legacy versions
- */
-@Deprecated
-public class LegacyMetadataSerializer extends MetadataSerializer
-{
-    /**
-     * Legacy serialization is only used for SSTable level reset.
-     */
-    @Override
-    public void serialize(Map<MetadataType, MetadataComponent> components, DataOutputPlus out, Version version) throws IOException
-    {
-        ValidationMetadata validation = (ValidationMetadata) components.get(MetadataType.VALIDATION);
-        StatsMetadata stats = (StatsMetadata) components.get(MetadataType.STATS);
-        CompactionMetadata compaction = (CompactionMetadata) components.get(MetadataType.COMPACTION);
-
-        assert validation != null && stats != null && compaction != null && validation.partitioner != null;
-
-        EstimatedHistogram.serializer.serialize(stats.estimatedPartitionSize, out);
-        EstimatedHistogram.serializer.serialize(stats.estimatedColumnCount, out);
-        CommitLogPosition.serializer.serialize(stats.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE), out);
-        out.writeLong(stats.minTimestamp);
-        out.writeLong(stats.maxTimestamp);
-        out.writeInt(stats.maxLocalDeletionTime);
-        out.writeDouble(validation.bloomFilterFPChance);
-        out.writeDouble(stats.compressionRatio);
-        out.writeUTF(validation.partitioner);
-        out.writeInt(0); // compaction ancestors
-        StreamingHistogram.serializer.serialize(stats.estimatedTombstoneDropTime, out);
-        out.writeInt(stats.sstableLevel);
-        out.writeInt(stats.minClusteringValues.size());
-        for (ByteBuffer value : stats.minClusteringValues)
-            ByteBufferUtil.writeWithShortLength(value, out);
-        out.writeInt(stats.maxClusteringValues.size());
-        for (ByteBuffer value : stats.maxClusteringValues)
-            ByteBufferUtil.writeWithShortLength(value, out);
-        if (version.hasCommitLogLowerBound())
-            CommitLogPosition.serializer.serialize(stats.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE), out);
-        if (version.hasCommitLogIntervals())
-            commitLogPositionSetSerializer.serialize(stats.commitLogIntervals, out);
-    }
-
-    /**
-     * Legacy serializer deserialize all components no matter what types are specified.
-     */
-    @Override
-    public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException
-    {
-        Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class);
-
-        File statsFile = new File(descriptor.filenameFor(Component.STATS));
-        if (!statsFile.exists() && types.contains(MetadataType.STATS))
-        {
-            components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata());
-        }
-        else
-        {
-            try (DataInputStreamPlus in = new DataInputStreamPlus(new BufferedInputStream(new FileInputStream(statsFile))))
-            {
-                EstimatedHistogram partitionSizes = EstimatedHistogram.serializer.deserialize(in);
-                EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in);
-                CommitLogPosition commitLogLowerBound = CommitLogPosition.NONE;
-                CommitLogPosition commitLogUpperBound = CommitLogPosition.serializer.deserialize(in);
-                long minTimestamp = in.readLong();
-                long maxTimestamp = in.readLong();
-                int maxLocalDeletionTime = in.readInt();
-                double bloomFilterFPChance = in.readDouble();
-                double compressionRatio = in.readDouble();
-                String partitioner = in.readUTF();
-                int nbAncestors = in.readInt(); //skip compaction ancestors
-                in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors));
-                StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
-                int sstableLevel = 0;
-                if (in.available() > 0)
-                    sstableLevel = in.readInt();
-
-                int colCount = in.readInt();
-                List<ByteBuffer> minColumnNames = new ArrayList<>(colCount);
-                for (int i = 0; i < colCount; i++)
-                    minColumnNames.add(ByteBufferUtil.readWithShortLength(in));
-
-                colCount = in.readInt();
-                List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount);
-                for (int i = 0; i < colCount; i++)
-                    maxColumnNames.add(ByteBufferUtil.readWithShortLength(in));
-
-                if (descriptor.version.hasCommitLogLowerBound())
-                    commitLogLowerBound = CommitLogPosition.serializer.deserialize(in);
-                IntervalSet<CommitLogPosition> commitLogIntervals;
-                if (descriptor.version.hasCommitLogIntervals())
-                    commitLogIntervals = commitLogPositionSetSerializer.deserialize(in);
-                else
-                    commitLogIntervals = new IntervalSet<>(commitLogLowerBound, commitLogUpperBound);
-
-                if (types.contains(MetadataType.VALIDATION))
-                    components.put(MetadataType.VALIDATION,
-                                   new ValidationMetadata(partitioner, bloomFilterFPChance));
-                if (types.contains(MetadataType.STATS))
-                    components.put(MetadataType.STATS,
-                                   new StatsMetadata(partitionSizes,
-                                                     columnCounts,
-                                                     commitLogIntervals,
-                                                     minTimestamp,
-                                                     maxTimestamp,
-                                                     Integer.MAX_VALUE,
-                                                     maxLocalDeletionTime,
-                                                     0,
-                                                     Integer.MAX_VALUE,
-                                                     compressionRatio,
-                                                     tombstoneHistogram,
-                                                     sstableLevel,
-                                                     minColumnNames,
-                                                     maxColumnNames,
-                                                     true,
-                                                     ActiveRepairService.UNREPAIRED_SSTABLE,
-                                                     -1,
-                                                     -1));
-                if (types.contains(MetadataType.COMPACTION))
-                    components.put(MetadataType.COMPACTION,
-                                   new CompactionMetadata(null));
-            }
-        }
-        return components;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
index c83c2cf..0f6434b 100644
--- a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
+++ b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java
@@ -236,10 +236,7 @@ public class StatsMetadata extends MetadataComponent
             size += EstimatedHistogram.serializer.serializedSize(component.estimatedPartitionSize);
             size += EstimatedHistogram.serializer.serializedSize(component.estimatedColumnCount);
             size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE));
-            if (version.storeRows())
-                size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long)
-            else
-                size += 8 + 8 + 4 + 8 + 8; // mix/max timestamp(long), maxLocalDeletionTime(int), compressionRatio(double), repairedAt (long)
+            size += 8 + 8 + 4 + 4 + 4 + 4 + 8 + 8; // mix/max timestamp(long), min/maxLocalDeletionTime(int), min/max TTL, compressionRatio(double), repairedAt (long)
             size += StreamingHistogram.serializer.serializedSize(component.estimatedTombstoneDropTime);
             size += TypeSizes.sizeof(component.sstableLevel);
             // min column names
@@ -251,8 +248,7 @@ public class StatsMetadata extends MetadataComponent
             for (ByteBuffer value : component.maxClusteringValues)
                 size += 2 + value.remaining(); // with short length
             size += TypeSizes.sizeof(component.hasLegacyCounterShards);
-            if (version.storeRows())
-                size += 8 + 8; // totalColumnsSet, totalRows
+            size += 8 + 8; // totalColumnsSet, totalRows
             if (version.hasCommitLogLowerBound())
                 size += CommitLogPosition.serializer.serializedSize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE));
             if (version.hasCommitLogIntervals())
@@ -267,14 +263,10 @@ public class StatsMetadata extends MetadataComponent
             CommitLogPosition.serializer.serialize(component.commitLogIntervals.upperBound().orElse(CommitLogPosition.NONE), out);
             out.writeLong(component.minTimestamp);
             out.writeLong(component.maxTimestamp);
-            if (version.storeRows())
-                out.writeInt(component.minLocalDeletionTime);
+            out.writeInt(component.minLocalDeletionTime);
             out.writeInt(component.maxLocalDeletionTime);
-            if (version.storeRows())
-            {
-                out.writeInt(component.minTTL);
-                out.writeInt(component.maxTTL);
-            }
+            out.writeInt(component.minTTL);
+            out.writeInt(component.maxTTL);
             out.writeDouble(component.compressionRatio);
             StreamingHistogram.serializer.serialize(component.estimatedTombstoneDropTime, out);
             out.writeInt(component.sstableLevel);
@@ -287,11 +279,8 @@ public class StatsMetadata extends MetadataComponent
                 ByteBufferUtil.writeWithShortLength(value, out);
             out.writeBoolean(component.hasLegacyCounterShards);
 
-            if (version.storeRows())
-            {
-                out.writeLong(component.totalColumnsSet);
-                out.writeLong(component.totalRows);
-            }
+            out.writeLong(component.totalColumnsSet);
+            out.writeLong(component.totalRows);
 
             if (version.hasCommitLogLowerBound())
                 CommitLogPosition.serializer.serialize(component.commitLogIntervals.lowerBound().orElse(CommitLogPosition.NONE), out);
@@ -307,17 +296,14 @@ public class StatsMetadata extends MetadataComponent
             commitLogUpperBound = CommitLogPosition.serializer.deserialize(in);
             long minTimestamp = in.readLong();
             long maxTimestamp = in.readLong();
-            // We use MAX_VALUE as that's the default value for "no deletion time"
-            int minLocalDeletionTime = version.storeRows() ? in.readInt() : Integer.MAX_VALUE;
+            int minLocalDeletionTime = in.readInt();
             int maxLocalDeletionTime = in.readInt();
-            int minTTL = version.storeRows() ? in.readInt() : 0;
-            int maxTTL = version.storeRows() ? in.readInt() : Integer.MAX_VALUE;
+            int minTTL = in.readInt();
+            int maxTTL = in.readInt();
             double compressionRatio = in.readDouble();
             StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
             int sstableLevel = in.readInt();
-            long repairedAt = 0;
-            if (version.hasRepairedAt())
-                repairedAt = in.readLong();
+            long repairedAt = in.readLong();
 
             int colCount = in.readInt();
             List<ByteBuffer> minClusteringValues = new ArrayList<>(colCount);
@@ -329,12 +315,10 @@ public class StatsMetadata extends MetadataComponent
             for (int i = 0; i < colCount; i++)
                 maxClusteringValues.add(ByteBufferUtil.readWithShortLength(in));
 
-            boolean hasLegacyCounterShards = true;
-            if (version.tracksLegacyCounterShards())
-                hasLegacyCounterShards = in.readBoolean();
+            boolean hasLegacyCounterShards = in.readBoolean();
 
-            long totalColumnsSet = version.storeRows() ? in.readLong() : -1L;
-            long totalRows = version.storeRows() ? in.readLong() : -1L;
+            long totalColumnsSet = in.readLong();
+            long totalRows = in.readLong();
 
             if (version.hasCommitLogLowerBound())
                 commitLogLowerBound = CommitLogPosition.serializer.deserialize(in);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/util/CompressedChunkReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/CompressedChunkReader.java b/src/java/org/apache/cassandra/io/util/CompressedChunkReader.java
index 8f00ce7..219f0eb 100644
--- a/src/java/org/apache/cassandra/io/util/CompressedChunkReader.java
+++ b/src/java/org/apache/cassandra/io/util/CompressedChunkReader.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.io.compress.CompressionMetadata;
 import org.apache.cassandra.io.compress.CorruptBlockException;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
+import org.apache.cassandra.utils.ChecksumType;
 
 public abstract class CompressedChunkReader extends AbstractReaderFileProxy implements ChunkReader
 {
@@ -142,7 +143,7 @@ public abstract class CompressedChunkReader extends AbstractReaderFileProxy impl
                 if (getCrcCheckChance() > ThreadLocalRandom.current().nextDouble())
                 {
                     compressed.rewind();
-                    int checksum = (int) metadata.checksumType.of(compressed);
+                    int checksum = (int) ChecksumType.CRC32.of(compressed);
 
                     compressed.clear().limit(Integer.BYTES);
                     if (channel.read(compressed, chunk.offset + chunk.length) != Integer.BYTES
@@ -204,7 +205,7 @@ public abstract class CompressedChunkReader extends AbstractReaderFileProxy impl
                 {
                     compressedChunk.position(chunkOffset).limit(chunkOffset + chunk.length);
 
-                    int checksum = (int) metadata.checksumType.of(compressedChunk);
+                    int checksum = (int) ChecksumType.CRC32.of(compressedChunk);
 
                     compressedChunk.limit(compressedChunk.capacity());
                     if (compressedChunk.getInt() != checksum)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
index cee23c9..91b189d 100644
--- a/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
+++ b/src/java/org/apache/cassandra/io/util/DataIntegrityMetadata.java
@@ -44,7 +44,7 @@ public class DataIntegrityMetadata
 
         public ChecksumValidator(Descriptor descriptor) throws IOException
         {
-            this(descriptor.version.uncompressedChecksumType(),
+            this(ChecksumType.CRC32,
                  RandomAccessReader.open(new File(descriptor.filenameFor(Component.CRC))),
                  descriptor.filenameFor(Component.DATA));
         }
@@ -99,8 +99,8 @@ public class DataIntegrityMetadata
         public FileDigestValidator(Descriptor descriptor) throws IOException
         {
             this.descriptor = descriptor;
-            checksum = descriptor.version.uncompressedChecksumType().newInstance();
-            digestReader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.digestFor(descriptor.version.uncompressedChecksumType()))));
+            checksum = ChecksumType.CRC32.newInstance();
+            digestReader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.DIGEST)));
             dataReader = RandomAccessReader.open(new File(descriptor.filenameFor(Component.DATA)));
             try
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/IncomingTcpConnection.java b/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
index 9878590..53e53a4 100644
--- a/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
+++ b/src/java/org/apache/cassandra/net/IncomingTcpConnection.java
@@ -86,9 +86,9 @@ public class IncomingTcpConnection extends FastThreadLocalThread implements Clos
     {
         try
         {
-            if (version < MessagingService.VERSION_20)
+            if (version < MessagingService.VERSION_30)
                 throw new UnsupportedOperationException(String.format("Unable to read obsolete message version %s; "
-                                                                      + "The earliest version supported is 2.0.0",
+                                                                      + "The earliest version supported is 3.0.0",
                                                                       version));
 
             receiveMessages();
@@ -155,18 +155,11 @@ public class IncomingTcpConnection extends FastThreadLocalThread implements Clos
         if (compressed)
         {
             logger.trace("Upgrading incoming connection to be compressed");
-            if (version < MessagingService.VERSION_21)
-            {
-                in = new DataInputStreamPlus(new SnappyInputStream(socket.getInputStream()));
-            }
-            else
-            {
-                LZ4FastDecompressor decompressor = LZ4Factory.fastestInstance().fastDecompressor();
-                Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(OutboundTcpConnection.LZ4_HASH_SEED).asChecksum();
-                in = new DataInputStreamPlus(new LZ4BlockInputStream(socket.getInputStream(),
-                                                                 decompressor,
-                                                                 checksum));
-            }
+            LZ4FastDecompressor decompressor = LZ4Factory.fastestInstance().fastDecompressor();
+            Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(OutboundTcpConnection.LZ4_HASH_SEED).asChecksum();
+            in = new DataInputStreamPlus(new LZ4BlockInputStream(socket.getInputStream(),
+                                                             decompressor,
+                                                             checksum));
         }
         else
         {
@@ -183,11 +176,8 @@ public class IncomingTcpConnection extends FastThreadLocalThread implements Clos
 
     private InetAddress receiveMessage(DataInputPlus input, int version) throws IOException
     {
-        int id;
-        if (version < MessagingService.VERSION_20)
-            id = Integer.parseInt(input.readUTF());
-        else
-            id = input.readInt();
+        int id = input.readInt();
+
         long currentTime = ApproximateTime.currentTimeMillis();
         MessageIn message = MessageIn.read(input, version, id, MessageIn.readConstructionTime(from, input, currentTime));
         if (message == null)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/net/MessageOut.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/MessageOut.java b/src/java/org/apache/cassandra/net/MessageOut.java
index 4f41ee5..94019f2 100644
--- a/src/java/org/apache/cassandra/net/MessageOut.java
+++ b/src/java/org/apache/cassandra/net/MessageOut.java
@@ -104,7 +104,7 @@ public class MessageOut<T>
     {
         CompactEndpointSerializationHelper.serialize(from, out);
 
-        out.writeInt(MessagingService.Verb.convertForMessagingServiceVersion(verb, version).ordinal());
+        out.writeInt(verb.ordinal());
         out.writeInt(parameters.size());
         for (Map.Entry<String, byte[]> entry : parameters.entrySet())
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/net/MessagingService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java
index f82e80b..38c1cd2 100644
--- a/src/java/org/apache/cassandra/net/MessagingService.java
+++ b/src/java/org/apache/cassandra/net/MessagingService.java
@@ -88,10 +88,6 @@ public final class MessagingService implements MessagingServiceMBean
     public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
 
     // 8 bits version, so don't waste versions
-    public static final int VERSION_12 = 6;
-    public static final int VERSION_20 = 7;
-    public static final int VERSION_21 = 8;
-    public static final int VERSION_22 = 9;
     public static final int VERSION_30 = 10;
     public static final int current_version = VERSION_30;
 
@@ -105,9 +101,6 @@ public final class MessagingService implements MessagingServiceMBean
      */
     public static final int PROTOCOL_MAGIC = 0xCA552DFA;
 
-    private boolean allNodesAtLeast22 = true;
-    private boolean allNodesAtLeast30 = true;
-
     public final MessagingMetrics metrics = new MessagingMetrics();
 
     /* All verb handler identifiers */
@@ -236,16 +229,6 @@ public final class MessagingService implements MessagingServiceMBean
         UNUSED_5,
         ;
 
-        // This is to support a "late" choice of the verb based on the messaging service version.
-        // See CASSANDRA-12249 for more details.
-        public static Verb convertForMessagingServiceVersion(Verb verb, int version)
-        {
-            if (verb == PAGED_RANGE && version >= VERSION_30)
-                return RANGE_SLICE;
-
-            return verb;
-        }
-
         public long getTimeout()
         {
             return DatabaseDescriptor.getRpcTimeout();
@@ -319,9 +302,9 @@ public final class MessagingService implements MessagingServiceMBean
 
         put(Verb.MUTATION, Mutation.serializer);
         put(Verb.READ_REPAIR, Mutation.serializer);
-        put(Verb.READ, ReadCommand.readSerializer);
-        put(Verb.RANGE_SLICE, ReadCommand.rangeSliceSerializer);
-        put(Verb.PAGED_RANGE, ReadCommand.pagedRangeSerializer);
+        put(Verb.READ, ReadCommand.serializer);
+        put(Verb.RANGE_SLICE, ReadCommand.serializer);
+        put(Verb.PAGED_RANGE, ReadCommand.serializer);
         put(Verb.BOOTSTRAP_TOKEN, BootStrapper.StringSerializer.instance);
         put(Verb.REPAIR_MESSAGE, RepairMessage.serializer);
         put(Verb.GOSSIP_DIGEST_ACK, GossipDigestAck.serializer);
@@ -350,8 +333,8 @@ public final class MessagingService implements MessagingServiceMBean
         put(Verb.HINT, HintResponse.serializer);
         put(Verb.READ_REPAIR, WriteResponse.serializer);
         put(Verb.COUNTER_MUTATION, WriteResponse.serializer);
-        put(Verb.RANGE_SLICE, ReadResponse.rangeSliceSerializer);
-        put(Verb.PAGED_RANGE, ReadResponse.rangeSliceSerializer);
+        put(Verb.RANGE_SLICE, ReadResponse.serializer);
+        put(Verb.PAGED_RANGE, ReadResponse.serializer);
         put(Verb.READ, ReadResponse.serializer);
         put(Verb.TRUNCATE, TruncateResponse.serializer);
         put(Verb.SNAPSHOT, null);
@@ -1041,16 +1024,6 @@ public final class MessagingService implements MessagingServiceMBean
         return packed >>> (start + 1) - count & ~(-1 << count);
     }
 
-    public boolean areAllNodesAtLeast22()
-    {
-        return allNodesAtLeast22;
-    }
-
-    public boolean areAllNodesAtLeast30()
-    {
-        return allNodesAtLeast30;
-    }
-
     /**
      * @return the last version associated with address, or @param version if this is the first such version
      */
@@ -1058,50 +1031,16 @@ public final class MessagingService implements MessagingServiceMBean
     {
         // We can't talk to someone from the future
         version = Math.min(version, current_version);
-
         logger.trace("Setting version {} for {}", version, endpoint);
 
-        if (version < VERSION_22)
-            allNodesAtLeast22 = false;
-        if (version < VERSION_30)
-            allNodesAtLeast30 = false;
-
         Integer v = versions.put(endpoint, version);
-
-        // if the version was increased to 2.2 or later see if the min version across the cluster has changed
-        if (v != null && (v < VERSION_30 && version >= VERSION_22))
-            refreshAllNodeMinVersions();
-
         return v == null ? version : v;
     }
 
     public void resetVersion(InetAddress endpoint)
     {
         logger.trace("Resetting version for {}", endpoint);
-        Integer removed = versions.remove(endpoint);
-        if (removed != null && removed <= VERSION_30)
-            refreshAllNodeMinVersions();
-    }
-
-    private void refreshAllNodeMinVersions()
-    {
-        boolean anyNodeLowerThan30 = false;
-        for (Integer version : versions.values())
-        {
-            if (version < MessagingService.VERSION_30)
-            {
-                anyNodeLowerThan30 = true;
-                allNodesAtLeast30 = false;
-            }
-
-            if (version < MessagingService.VERSION_22)
-            {
-                allNodesAtLeast22 = false;
-                return;
-            }
-        }
-        allNodesAtLeast22 = true;
-        allNodesAtLeast30 = !anyNodeLowerThan30;
+        versions.remove(endpoint);
     }
 
     public int getVersion(InetAddress endpoint)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/net/OutboundTcpConnection.java b/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
index 1f47334..c32154e 100644
--- a/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
+++ b/src/java/org/apache/cassandra/net/OutboundTcpConnection.java
@@ -336,11 +336,7 @@ public class OutboundTcpConnection extends FastThreadLocalThread
     private void writeInternal(MessageOut message, int id, long timestamp) throws IOException
     {
         out.writeInt(MessagingService.PROTOCOL_MAGIC);
-
-        if (targetVersion < MessagingService.VERSION_20)
-            out.writeUTF(String.valueOf(id));
-        else
-            out.writeInt(id);
+        out.writeInt(id);
 
         // int cast cuts off the high-order half of the timestamp, which we can assume remains
         // the same between now and when the recipient reconstructs it.
@@ -427,9 +423,7 @@ public class OutboundTcpConnection extends FastThreadLocalThread
                 int maxTargetVersion = handshakeVersion(in);
                 if (maxTargetVersion == NO_VERSION)
                 {
-                    // no version is returned, so disconnect an try again: we will either get
-                    // a different target version (targetVersion < MessagingService.VERSION_12)
-                    // or if the same version the handshake will finally succeed
+                    // no version is returned, so disconnect an try again
                     logger.trace("Target max version is {}; no version information yet, will retry", maxTargetVersion);
                     if (DatabaseDescriptor.getSeeds().contains(poolReference.endPoint()))
                         logger.warn("Seed gossip version is {}; will not connect with that version", maxTargetVersion);
@@ -461,22 +455,15 @@ public class OutboundTcpConnection extends FastThreadLocalThread
                 {
                     out.flush();
                     logger.trace("Upgrading OutputStream to {} to be compressed", poolReference.endPoint());
-                    if (targetVersion < MessagingService.VERSION_21)
-                    {
-                        // Snappy is buffered, so no need for extra buffering output stream
-                        out = new WrappedDataOutputStreamPlus(new SnappyOutputStream(socket.getOutputStream()));
-                    }
-                    else
-                    {
-                        // TODO: custom LZ4 OS that supports BB write methods
-                        LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
-                        Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(LZ4_HASH_SEED).asChecksum();
-                        out = new WrappedDataOutputStreamPlus(new LZ4BlockOutputStream(socket.getOutputStream(),
-                                                                            1 << 14,  // 16k block size
-                                                                            compressor,
-                                                                            checksum,
-                                                                            true)); // no async flushing
-                    }
+
+                    // TODO: custom LZ4 OS that supports BB write methods
+                    LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
+                    Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(LZ4_HASH_SEED).asChecksum();
+                    out = new WrappedDataOutputStreamPlus(new LZ4BlockOutputStream(socket.getOutputStream(),
+                                                                        1 << 14,  // 16k block size
+                                                                        compressor,
+                                                                        checksum,
+                                                                        true)); // no async flushing
                 }
                 logger.debug("Done connecting to {}", poolReference.endPoint());
                 return true;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/repair/RepairJobDesc.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/RepairJobDesc.java b/src/java/org/apache/cassandra/repair/RepairJobDesc.java
index 05adbf9..be3daef 100644
--- a/src/java/org/apache/cassandra/repair/RepairJobDesc.java
+++ b/src/java/org/apache/cassandra/repair/RepairJobDesc.java
@@ -93,12 +93,10 @@ public class RepairJobDesc
     {
         public void serialize(RepairJobDesc desc, DataOutputPlus out, int version) throws IOException
         {
-            if (version >= MessagingService.VERSION_21)
-            {
-                out.writeBoolean(desc.parentSessionId != null);
-                if (desc.parentSessionId != null)
-                    UUIDSerializer.serializer.serialize(desc.parentSessionId, out, version);
-            }
+            out.writeBoolean(desc.parentSessionId != null);
+            if (desc.parentSessionId != null)
+                UUIDSerializer.serializer.serialize(desc.parentSessionId, out, version);
+
             UUIDSerializer.serializer.serialize(desc.sessionId, out, version);
             out.writeUTF(desc.keyspace);
             out.writeUTF(desc.columnFamily);
@@ -111,11 +109,8 @@ public class RepairJobDesc
         public RepairJobDesc deserialize(DataInputPlus in, int version) throws IOException
         {
             UUID parentSessionId = null;
-            if (version >= MessagingService.VERSION_21)
-            {
-                if (in.readBoolean())
-                    parentSessionId = UUIDSerializer.serializer.deserialize(in, version);
-            }
+            if (in.readBoolean())
+                parentSessionId = UUIDSerializer.serializer.deserialize(in, version);
             UUID sessionId = UUIDSerializer.serializer.deserialize(in, version);
             String keyspace = in.readUTF();
             String columnFamily = in.readUTF();
@@ -136,13 +131,9 @@ public class RepairJobDesc
 
         public long serializedSize(RepairJobDesc desc, int version)
         {
-            int size = 0;
-            if (version >= MessagingService.VERSION_21)
-            {
-                size += TypeSizes.sizeof(desc.parentSessionId != null);
-                if (desc.parentSessionId != null)
-                    size += UUIDSerializer.serializer.serializedSize(desc.parentSessionId, version);
-            }
+            int size = TypeSizes.sizeof(desc.parentSessionId != null);
+            if (desc.parentSessionId != null)
+                size += UUIDSerializer.serializer.serializedSize(desc.parentSessionId, version);
             size += UUIDSerializer.serializer.serializedSize(desc.sessionId, version);
             size += TypeSizes.sizeof(desc.keyspace);
             size += TypeSizes.sizeof(desc.columnFamily);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/repair/Validator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/repair/Validator.java b/src/java/org/apache/cassandra/repair/Validator.java
index a2a2512..e20995e 100644
--- a/src/java/org/apache/cassandra/repair/Validator.java
+++ b/src/java/org/apache/cassandra/repair/Validator.java
@@ -218,7 +218,7 @@ public class Validator implements Runnable
         validated++;
         // MerkleTree uses XOR internally, so we want lots of output bits here
         CountingDigest digest = new CountingDigest(FBUtilities.newMessageDigest("SHA-256"));
-        UnfilteredRowIterators.digest(null, partition, digest, MessagingService.current_version);
+        UnfilteredRowIterators.digest(partition, digest, MessagingService.current_version);
         // only return new hash for merkle tree in case digest was updated - see CASSANDRA-8979
         return digest.count > 0
              ? new MerkleTree.RowHash(partition.partitionKey().getToken(), digest.digest(), digest.count)


[09/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Serializers.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Serializers.java b/src/java/org/apache/cassandra/db/Serializers.java
deleted file mode 100644
index d6aac64..0000000
--- a/src/java/org/apache/cassandra/db/Serializers.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import java.io.*;
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.CompositeType;
-import org.apache.cassandra.io.ISerializer;
-import org.apache.cassandra.io.sstable.IndexInfo;
-import org.apache.cassandra.io.sstable.format.big.BigFormat;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.io.sstable.format.Version;
-import org.apache.cassandra.utils.ByteBufferUtil;
-
-/**
- * Holds references on serializers that depend on the table definition.
- */
-public class Serializers
-{
-    private final CFMetaData metadata;
-
-    private Map<Version, IndexInfo.Serializer> otherVersionClusteringSerializers;
-
-    private final IndexInfo.Serializer latestVersionIndexSerializer;
-
-    public Serializers(CFMetaData metadata)
-    {
-        this.metadata = metadata;
-        this.latestVersionIndexSerializer = new IndexInfo.Serializer(BigFormat.latestVersion,
-                                                                     indexEntryClusteringPrefixSerializer(BigFormat.latestVersion, SerializationHeader.makeWithoutStats(metadata)));
-    }
-
-    IndexInfo.Serializer indexInfoSerializer(Version version, SerializationHeader header)
-    {
-        // null header indicates streaming from pre-3.0 sstables
-        if (version.equals(BigFormat.latestVersion) && header != null)
-            return latestVersionIndexSerializer;
-
-        if (otherVersionClusteringSerializers == null)
-            otherVersionClusteringSerializers = new ConcurrentHashMap<>();
-        IndexInfo.Serializer serializer = otherVersionClusteringSerializers.get(version);
-        if (serializer == null)
-        {
-            serializer = new IndexInfo.Serializer(version,
-                                                  indexEntryClusteringPrefixSerializer(version, header));
-            otherVersionClusteringSerializers.put(version, serializer);
-        }
-        return serializer;
-    }
-
-    // TODO: Once we drop support for old (pre-3.0) sstables, we can drop this method and inline the calls to
-    // ClusteringPrefix.serializer directly. At which point this whole class probably becomes
-    // unecessary (since IndexInfo.Serializer won't depend on the metadata either).
-    private ISerializer<ClusteringPrefix> indexEntryClusteringPrefixSerializer(Version version, SerializationHeader header)
-    {
-        if (!version.storeRows() || header ==  null) //null header indicates streaming from pre-3.0 sstables
-        {
-            return oldFormatSerializer(version);
-        }
-
-        return new NewFormatSerializer(version, header.clusteringTypes());
-    }
-
-    private ISerializer<ClusteringPrefix> oldFormatSerializer(Version version)
-    {
-        return new ISerializer<ClusteringPrefix>()
-        {
-            List<AbstractType<?>> clusteringTypes = SerializationHeader.makeWithoutStats(metadata).clusteringTypes();
-
-            public void serialize(ClusteringPrefix clustering, DataOutputPlus out) throws IOException
-            {
-                //we deserialize in the old format and serialize in the new format
-                ClusteringPrefix.serializer.serialize(clustering, out,
-                                                      version.correspondingMessagingVersion(),
-                                                      clusteringTypes);
-            }
-
-            @Override
-            public void skip(DataInputPlus in) throws IOException
-            {
-                ByteBufferUtil.skipShortLength(in);
-            }
-
-            public ClusteringPrefix deserialize(DataInputPlus in) throws IOException
-            {
-                // We're reading the old cellname/composite
-                ByteBuffer bb = ByteBufferUtil.readWithShortLength(in);
-                assert bb.hasRemaining(); // empty cellnames were invalid
-
-                int clusteringSize = metadata.clusteringColumns().size();
-                // If the table has no clustering column, then the cellname will just be the "column" name, which we ignore here.
-                if (clusteringSize == 0)
-                    return Clustering.EMPTY;
-
-                if (!metadata.isCompound())
-                    return Clustering.make(bb);
-
-                List<ByteBuffer> components = CompositeType.splitName(bb);
-                byte eoc = CompositeType.lastEOC(bb);
-
-                if (eoc == 0 || components.size() >= clusteringSize)
-                {
-                    // That's a clustering.
-                    if (components.size() > clusteringSize)
-                        components = components.subList(0, clusteringSize);
-
-                    return Clustering.make(components.toArray(new ByteBuffer[clusteringSize]));
-                }
-                else
-                {
-                    // It's a range tombstone bound. It is a start since that's the only part we've ever included
-                    // in the index entries.
-                    ClusteringPrefix.Kind boundKind = eoc > 0
-                                                 ? ClusteringPrefix.Kind.EXCL_START_BOUND
-                                                 : ClusteringPrefix.Kind.INCL_START_BOUND;
-
-                    return ClusteringBound.create(boundKind, components.toArray(new ByteBuffer[components.size()]));
-                }
-            }
-
-            public long serializedSize(ClusteringPrefix clustering)
-            {
-                return ClusteringPrefix.serializer.serializedSize(clustering, version.correspondingMessagingVersion(),
-                                                                  clusteringTypes);
-            }
-        };
-    }
-
-    private static class NewFormatSerializer implements ISerializer<ClusteringPrefix>
-    {
-        private final Version version;
-        private final List<AbstractType<?>> clusteringTypes;
-
-        NewFormatSerializer(Version version, List<AbstractType<?>> clusteringTypes)
-        {
-            this.version = version;
-            this.clusteringTypes = clusteringTypes;
-        }
-
-        public void serialize(ClusteringPrefix clustering, DataOutputPlus out) throws IOException
-        {
-            ClusteringPrefix.serializer.serialize(clustering, out, version.correspondingMessagingVersion(), clusteringTypes);
-        }
-
-        @Override
-        public void skip(DataInputPlus in) throws IOException
-        {
-            ClusteringPrefix.serializer.skip(in, version.correspondingMessagingVersion(), clusteringTypes);
-        }
-
-        public ClusteringPrefix deserialize(DataInputPlus in) throws IOException
-        {
-            return ClusteringPrefix.serializer.deserialize(in, version.correspondingMessagingVersion(), clusteringTypes);
-        }
-
-        public long serializedSize(ClusteringPrefix clustering)
-        {
-            return ClusteringPrefix.serializer.serializedSize(clustering, version.correspondingMessagingVersion(), clusteringTypes);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
index d87d277..1d6ec7b 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
@@ -935,9 +935,9 @@ public class SinglePartitionReadCommand extends ReadCommand
                              nowInSec());
     }
 
-    public MessageOut<ReadCommand> createMessage(int version)
+    public MessageOut<ReadCommand> createMessage()
     {
-        return new MessageOut<>(MessagingService.Verb.READ, this, readSerializer);
+        return new MessageOut<>(MessagingService.Verb.READ, this, serializer);
     }
 
     protected void appendCQLWhereClause(StringBuilder sb)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index 31a461b..91adf3a 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -102,16 +102,6 @@ public final class SystemKeyspace
     public static final String BUILT_VIEWS = "built_views";
     public static final String PREPARED_STATEMENTS = "prepared_statements";
 
-    @Deprecated public static final String LEGACY_HINTS = "hints";
-    @Deprecated public static final String LEGACY_BATCHLOG = "batchlog";
-    @Deprecated public static final String LEGACY_KEYSPACES = "schema_keyspaces";
-    @Deprecated public static final String LEGACY_COLUMNFAMILIES = "schema_columnfamilies";
-    @Deprecated public static final String LEGACY_COLUMNS = "schema_columns";
-    @Deprecated public static final String LEGACY_TRIGGERS = "schema_triggers";
-    @Deprecated public static final String LEGACY_USERTYPES = "schema_usertypes";
-    @Deprecated public static final String LEGACY_FUNCTIONS = "schema_functions";
-    @Deprecated public static final String LEGACY_AGGREGATES = "schema_aggregates";
-
     public static final CFMetaData Batches =
         compile(BATCHES,
                 "batches awaiting replay",
@@ -288,148 +278,6 @@ public final class SystemKeyspace
                 + "query_string text,"
                 + "PRIMARY KEY ((prepared_id)))");
 
-    @Deprecated
-    public static final CFMetaData LegacyHints =
-        compile(LEGACY_HINTS,
-                "*DEPRECATED* hints awaiting delivery",
-                "CREATE TABLE %s ("
-                + "target_id uuid,"
-                + "hint_id timeuuid,"
-                + "message_version int,"
-                + "mutation blob,"
-                + "PRIMARY KEY ((target_id), hint_id, message_version)) "
-                + "WITH COMPACT STORAGE")
-                .compaction(CompactionParams.scts(singletonMap("enabled", "false")))
-                .gcGraceSeconds(0);
-
-    @Deprecated
-    public static final CFMetaData LegacyBatchlog =
-        compile(LEGACY_BATCHLOG,
-                "*DEPRECATED* batchlog entries",
-                "CREATE TABLE %s ("
-                + "id uuid,"
-                + "data blob,"
-                + "version int,"
-                + "written_at timestamp,"
-                + "PRIMARY KEY ((id)))")
-                .compaction(CompactionParams.scts(singletonMap("min_threshold", "2")))
-                .gcGraceSeconds(0);
-
-    @Deprecated
-    public static final CFMetaData LegacyKeyspaces =
-        compile(LEGACY_KEYSPACES,
-                "*DEPRECATED* keyspace definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "durable_writes boolean,"
-                + "strategy_class text,"
-                + "strategy_options text,"
-                + "PRIMARY KEY ((keyspace_name))) "
-                + "WITH COMPACT STORAGE");
-
-    @Deprecated
-    public static final CFMetaData LegacyColumnfamilies =
-        compile(LEGACY_COLUMNFAMILIES,
-                "*DEPRECATED* table definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "columnfamily_name text,"
-                + "bloom_filter_fp_chance double,"
-                + "caching text,"
-                + "cf_id uuid," // post-2.1 UUID cfid
-                + "comment text,"
-                + "compaction_strategy_class text,"
-                + "compaction_strategy_options text,"
-                + "comparator text,"
-                + "compression_parameters text,"
-                + "default_time_to_live int,"
-                + "default_validator text,"
-                + "dropped_columns map<text, bigint>,"
-                + "gc_grace_seconds int,"
-                + "is_dense boolean,"
-                + "key_validator text,"
-                + "local_read_repair_chance double,"
-                + "max_compaction_threshold int,"
-                + "max_index_interval int,"
-                + "memtable_flush_period_in_ms int,"
-                + "min_compaction_threshold int,"
-                + "min_index_interval int,"
-                + "read_repair_chance double,"
-                + "speculative_retry text,"
-                + "subcomparator text,"
-                + "type text,"
-                + "PRIMARY KEY ((keyspace_name), columnfamily_name))");
-
-    @Deprecated
-    public static final CFMetaData LegacyColumns =
-        compile(LEGACY_COLUMNS,
-                "*DEPRECATED* column definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "columnfamily_name text,"
-                + "column_name text,"
-                + "component_index int,"
-                + "index_name text,"
-                + "index_options text,"
-                + "index_type text,"
-                + "type text,"
-                + "validator text,"
-                + "PRIMARY KEY ((keyspace_name), columnfamily_name, column_name))");
-
-    @Deprecated
-    public static final CFMetaData LegacyTriggers =
-        compile(LEGACY_TRIGGERS,
-                "*DEPRECATED* trigger definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "columnfamily_name text,"
-                + "trigger_name text,"
-                + "trigger_options map<text, text>,"
-                + "PRIMARY KEY ((keyspace_name), columnfamily_name, trigger_name))");
-
-    @Deprecated
-    public static final CFMetaData LegacyUsertypes =
-        compile(LEGACY_USERTYPES,
-                "*DEPRECATED* user defined type definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "type_name text,"
-                + "field_names list<text>,"
-                + "field_types list<text>,"
-                + "PRIMARY KEY ((keyspace_name), type_name))");
-
-    @Deprecated
-    public static final CFMetaData LegacyFunctions =
-        compile(LEGACY_FUNCTIONS,
-                "*DEPRECATED* user defined function definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "function_name text,"
-                + "signature frozen<list<text>>,"
-                + "argument_names list<text>,"
-                + "argument_types list<text>,"
-                + "body text,"
-                + "language text,"
-                + "return_type text,"
-                + "called_on_null_input boolean,"
-                + "PRIMARY KEY ((keyspace_name), function_name, signature))");
-
-    @Deprecated
-    public static final CFMetaData LegacyAggregates =
-        compile(LEGACY_AGGREGATES,
-                "*DEPRECATED* user defined aggregate definitions",
-                "CREATE TABLE %s ("
-                + "keyspace_name text,"
-                + "aggregate_name text,"
-                + "signature frozen<list<text>>,"
-                + "argument_types list<text>,"
-                + "final_func text,"
-                + "initcond blob,"
-                + "return_type text,"
-                + "state_func text,"
-                + "state_type text,"
-                + "PRIMARY KEY ((keyspace_name), aggregate_name, signature))");
-
     private static CFMetaData compile(String name, String description, String schema)
     {
         return CFMetaData.compile(String.format(schema, name), SchemaConstants.SYSTEM_KEYSPACE_NAME)
@@ -457,16 +305,7 @@ public final class SystemKeyspace
                          TransferredRanges,
                          ViewsBuildsInProgress,
                          BuiltViews,
-                         LegacyHints,
-                         LegacyBatchlog,
-                         PreparedStatements,
-                         LegacyKeyspaces,
-                         LegacyColumnfamilies,
-                         LegacyColumns,
-                         LegacyTriggers,
-                         LegacyUsertypes,
-                         LegacyFunctions,
-                         LegacyAggregates);
+                         PreparedStatements);
     }
 
     private static Functions functions()
@@ -1131,18 +970,27 @@ public final class SystemKeyspace
         if (results.isEmpty())
             return new PaxosState(key, metadata);
         UntypedResultSet.Row row = results.one();
+
+        // Note: Pre-3.0, we used to not store the versions at which things were serialized. As 3.0 is a mandatory
+        // upgrade to 4.0+ and the paxos table is TTLed, it's _very_ unlikely we'll ever read a proposal or MRC without
+        // a version. But if we do (say gc_grace, on which the TTL is based, happens to be super large), we consider
+        // the commit too old and ignore it.
+        if (!row.has("proposal_version") || !row.has("most_recent_commit_version"))
+            return new PaxosState(key, metadata);
+
+
         Commit promised = row.has("in_progress_ballot")
                         ? new Commit(row.getUUID("in_progress_ballot"), new PartitionUpdate(metadata, key, metadata.partitionColumns(), 1))
                         : Commit.emptyCommit(key, metadata);
         // either we have both a recently accepted ballot and update or we have neither
-        int proposalVersion = row.has("proposal_version") ? row.getInt("proposal_version") : MessagingService.VERSION_21;
-        Commit accepted = row.has("proposal")
-                        ? new Commit(row.getUUID("proposal_ballot"), PartitionUpdate.fromBytes(row.getBytes("proposal"), proposalVersion, key))
+        Commit accepted = row.has("proposal_version") && row.has("proposal")
+                        ? new Commit(row.getUUID("proposal_ballot"),
+                                     PartitionUpdate.fromBytes(row.getBytes("proposal"), row.getInt("proposal_version")))
                         : Commit.emptyCommit(key, metadata);
         // either most_recent_commit and most_recent_commit_at will both be set, or neither
-        int mostRecentVersion = row.has("most_recent_commit_version") ? row.getInt("most_recent_commit_version") : MessagingService.VERSION_21;
-        Commit mostRecent = row.has("most_recent_commit")
-                          ? new Commit(row.getUUID("most_recent_commit_at"), PartitionUpdate.fromBytes(row.getBytes("most_recent_commit"), mostRecentVersion, key))
+        Commit mostRecent = row.has("most_recent_commit_version") && row.has("most_recent_commit")
+                          ? new Commit(row.getUUID("most_recent_commit_at"),
+                                       PartitionUpdate.fromBytes(row.getBytes("most_recent_commit"), row.getInt("most_recent_commit_version")))
                           : Commit.emptyCommit(key, metadata);
         return new PaxosState(promised, accepted, mostRecent);
     }
@@ -1404,45 +1252,17 @@ public final class SystemKeyspace
         return result.one().getString("release_version");
     }
 
-    /**
-     * Check data directories for old files that can be removed when migrating from 2.1 or 2.2 to 3.0,
-     * these checks can be removed in 4.0, see CASSANDRA-7066
-     */
-    public static void migrateDataDirs()
-    {
-        Iterable<String> dirs = Arrays.asList(DatabaseDescriptor.getAllDataFileLocations());
-        for (String dataDir : dirs)
-        {
-            logger.trace("Checking {} for old files", dataDir);
-            File dir = new File(dataDir);
-            assert dir.exists() : dir + " should have been created by startup checks";
-
-            for (File ksdir : dir.listFiles((d, n) -> new File(d, n).isDirectory()))
-            {
-                logger.trace("Checking {} for old files", ksdir);
-
-                for (File cfdir : ksdir.listFiles((d, n) -> new File(d, n).isDirectory()))
-                {
-                    logger.trace("Checking {} for old files", cfdir);
-
-                    if (Descriptor.isLegacyFile(cfdir))
-                    {
-                        FileUtils.deleteRecursive(cfdir);
-                    }
-                    else
-                    {
-                        FileUtils.delete(cfdir.listFiles((d, n) -> Descriptor.isLegacyFile(new File(d, n))));
-                    }
-                }
-            }
-        }
-    }
-
     private static ByteBuffer rangeToBytes(Range<Token> range)
     {
         try (DataOutputBuffer out = new DataOutputBuffer())
         {
-            Range.tokenSerializer.serialize(range, out, MessagingService.VERSION_22);
+            // The format with which token ranges are serialized in the system tables is the pre-3.0 serialization
+            // formot for ranges, so we should maintain that for now. And while we don't really support pre-3.0
+            // messaging versions, we know AbstractBounds.Serializer still support it _exactly_ for this use case, so we
+            // pass 0 as the version to trigger that legacy code.
+            // In the future, it might be worth switching to a stable text format for the ranges to 1) save that and 2)
+            // be more user friendly (the serialization format we currently use is pretty custom).
+            Range.tokenSerializer.serialize(range, out, 0);
             return out.buffer();
         }
         catch (IOException e)
@@ -1456,9 +1276,10 @@ public final class SystemKeyspace
     {
         try
         {
+            // See rangeToBytes above for why version is 0.
             return (Range<Token>) Range.tokenSerializer.deserialize(ByteStreams.newDataInput(ByteBufferUtil.getArray(rawRange)),
                                                                     partitioner,
-                                                                    MessagingService.VERSION_22);
+                                                                    0);
         }
         catch (IOException e)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
index 9e39105..c32a642 100644
--- a/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
+++ b/src/java/org/apache/cassandra/db/UnfilteredDeserializer.java
@@ -39,7 +39,7 @@ import org.apache.cassandra.net.MessagingService;
  * we don't do more work than necessary (i.e. we don't allocate/deserialize
  * objects for things we don't care about).
  */
-public abstract class UnfilteredDeserializer
+public class UnfilteredDeserializer
 {
     private static final Logger logger = LoggerFactory.getLogger(UnfilteredDeserializer.class);
 
@@ -47,32 +47,67 @@ public abstract class UnfilteredDeserializer
     protected final DataInputPlus in;
     protected final SerializationHelper helper;
 
-    protected UnfilteredDeserializer(CFMetaData metadata,
-                                     DataInputPlus in,
-                                     SerializationHelper helper)
+    private final ClusteringPrefix.Deserializer clusteringDeserializer;
+    private final SerializationHeader header;
+
+    private int nextFlags;
+    private int nextExtendedFlags;
+    private boolean isReady;
+    private boolean isDone;
+
+    private final Row.Builder builder;
+
+    private UnfilteredDeserializer(CFMetaData metadata,
+                                   DataInputPlus in,
+                                   SerializationHeader header,
+                                   SerializationHelper helper)
     {
         this.metadata = metadata;
         this.in = in;
         this.helper = helper;
+        this.header = header;
+        this.clusteringDeserializer = new ClusteringPrefix.Deserializer(metadata.comparator, in, header);
+        this.builder = BTreeRow.sortedBuilder();
     }
 
     public static UnfilteredDeserializer create(CFMetaData metadata,
                                                 DataInputPlus in,
                                                 SerializationHeader header,
-                                                SerializationHelper helper,
-                                                DeletionTime partitionDeletion,
-                                                boolean readAllAsDynamic)
+                                                SerializationHelper helper)
     {
-        if (helper.version >= MessagingService.VERSION_30)
-            return new CurrentDeserializer(metadata, in, header, helper);
-        else
-            return new OldFormatDeserializer(metadata, in, helper, partitionDeletion, readAllAsDynamic);
+        return new UnfilteredDeserializer(metadata, in, header, helper);
     }
 
     /**
      * Whether or not there is more atom to read.
      */
-    public abstract boolean hasNext() throws IOException;
+    public boolean hasNext() throws IOException
+    {
+        if (isReady)
+            return true;
+
+        prepareNext();
+        return !isDone;
+    }
+
+    private void prepareNext() throws IOException
+    {
+        if (isDone)
+            return;
+
+        nextFlags = in.readUnsignedByte();
+        if (UnfilteredSerializer.isEndOfPartition(nextFlags))
+        {
+            isDone = true;
+            isReady = false;
+            return;
+        }
+
+        nextExtendedFlags = UnfilteredSerializer.readExtendedFlags(in, nextFlags);
+
+        clusteringDeserializer.prepare(nextFlags, nextExtendedFlags);
+        isReady = true;
+    }
 
     /**
      * Compare the provided bound to the next atom to read on disk.
@@ -81,585 +116,68 @@ public abstract class UnfilteredDeserializer
      * comparison. Whenever we know what to do with this atom (read it or skip it),
      * readNext or skipNext should be called.
      */
-    public abstract int compareNextTo(ClusteringBound bound) throws IOException;
-
-    /**
-     * Returns whether the next atom is a row or not.
-     */
-    public abstract boolean nextIsRow() throws IOException;
-
-    /**
-     * Returns whether the next atom is the static row or not.
-     */
-    public abstract boolean nextIsStatic() throws IOException;
+    public int compareNextTo(ClusteringBound bound) throws IOException
+    {
+        if (!isReady)
+            prepareNext();
 
-    /**
-     * Returns the next atom.
-     */
-    public abstract Unfiltered readNext() throws IOException;
+        assert !isDone;
 
-    /**
-     * Clears any state in this deserializer.
-     */
-    public abstract void clearState() throws IOException;
+        return clusteringDeserializer.compareNextTo(bound);
+    }
 
     /**
-     * Skips the next atom.
+     * Returns whether the next atom is a row or not.
      */
-    public abstract void skipNext() throws IOException;
+    public boolean nextIsRow() throws IOException
+    {
+        if (!isReady)
+            prepareNext();
 
+        return UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.ROW;
+    }
 
     /**
-     * For the legacy layout deserializer, we have to deal with the fact that a row can span multiple index blocks and that
-     * the call to hasNext() reads the next element upfront. We must take that into account when we check in AbstractSSTableIterator if
-     * we're past the end of an index block boundary as that check expect to account for only consumed data (that is, if hasNext has
-     * been called and made us cross an index boundary but neither readNext() or skipNext() as yet been called, we shouldn't consider
-     * the index block boundary crossed yet).
-     *
-     * TODO: we don't care about this for the current file format because a row can never span multiple index blocks (further, hasNext()
-     * only just basically read 2 bytes from disk in that case). So once we drop backward compatibility with pre-3.0 sstable, we should
-     * remove this.
+     * Returns the next atom.
      */
-    public abstract long bytesReadForUnconsumedData();
-
-    private static class CurrentDeserializer extends UnfilteredDeserializer
+    public Unfiltered readNext() throws IOException
     {
-        private final ClusteringPrefix.Deserializer clusteringDeserializer;
-        private final SerializationHeader header;
-
-        private int nextFlags;
-        private int nextExtendedFlags;
-        private boolean isReady;
-        private boolean isDone;
-
-        private final Row.Builder builder;
-
-        private CurrentDeserializer(CFMetaData metadata,
-                                    DataInputPlus in,
-                                    SerializationHeader header,
-                                    SerializationHelper helper)
+        isReady = false;
+        if (UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER)
         {
-            super(metadata, in, helper);
-            this.header = header;
-            this.clusteringDeserializer = new ClusteringPrefix.Deserializer(metadata.comparator, in, header);
-            this.builder = BTreeRow.sortedBuilder();
+            ClusteringBoundOrBoundary bound = clusteringDeserializer.deserializeNextBound();
+            return UnfilteredSerializer.serializer.deserializeMarkerBody(in, header, bound);
         }
-
-        public boolean hasNext() throws IOException
-        {
-            if (isReady)
-                return true;
-
-            prepareNext();
-            return !isDone;
-        }
-
-        private void prepareNext() throws IOException
-        {
-            if (isDone)
-                return;
-
-            nextFlags = in.readUnsignedByte();
-            if (UnfilteredSerializer.isEndOfPartition(nextFlags))
-            {
-                isDone = true;
-                isReady = false;
-                return;
-            }
-
-            nextExtendedFlags = UnfilteredSerializer.readExtendedFlags(in, nextFlags);
-
-            clusteringDeserializer.prepare(nextFlags, nextExtendedFlags);
-            isReady = true;
-        }
-
-        public int compareNextTo(ClusteringBound bound) throws IOException
-        {
-            if (!isReady)
-                prepareNext();
-
-            assert !isDone;
-
-            return clusteringDeserializer.compareNextTo(bound);
-        }
-
-        public boolean nextIsRow() throws IOException
-        {
-            if (!isReady)
-                prepareNext();
-
-            return UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.ROW;
-        }
-
-        public boolean nextIsStatic() throws IOException
-        {
-            // This exists only for the sake of the OldFormatDeserializer
-            throw new UnsupportedOperationException();
-        }
-
-        public Unfiltered readNext() throws IOException
-        {
-            isReady = false;
-            if (UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER)
-            {
-                ClusteringBoundOrBoundary bound = clusteringDeserializer.deserializeNextBound();
-                return UnfilteredSerializer.serializer.deserializeMarkerBody(in, header, bound);
-            }
-            else
-            {
-                builder.newRow(clusteringDeserializer.deserializeNextClustering());
-                return UnfilteredSerializer.serializer.deserializeRowBody(in, header, helper, nextFlags, nextExtendedFlags, builder);
-            }
-        }
-
-        public void skipNext() throws IOException
-        {
-            isReady = false;
-            clusteringDeserializer.skipNext();
-            if (UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER)
-            {
-                UnfilteredSerializer.serializer.skipMarkerBody(in);
-            }
-            else
-            {
-                UnfilteredSerializer.serializer.skipRowBody(in);
-            }
-        }
-
-        public void clearState()
-        {
-            isReady = false;
-            isDone = false;
-        }
-
-        public long bytesReadForUnconsumedData()
+        else
         {
-            // In theory, hasNext() does consume 2-3 bytes, but we don't care about this for the current file format so returning
-            // 0 to mean "do nothing".
-            return 0;
+            builder.newRow(clusteringDeserializer.deserializeNextClustering());
+            return UnfilteredSerializer.serializer.deserializeRowBody(in, header, helper, nextFlags, nextExtendedFlags, builder);
         }
     }
 
-    public static class OldFormatDeserializer extends UnfilteredDeserializer
+    /**
+     * Clears any state in this deserializer.
+     */
+    public void clearState()
     {
-        private final boolean readAllAsDynamic;
-        private boolean skipStatic;
-
-        // The next Unfiltered to return, computed by hasNext()
-        private Unfiltered next;
-        // A temporary storage for an unfiltered that isn't returned next but should be looked at just afterwards
-        private Unfiltered saved;
-
-        private boolean isFirst = true;
-
-        // The Unfiltered as read from the old format input
-        private final UnfilteredIterator iterator;
-
-        // The position in the input after the last data consumption (readNext/skipNext).
-        private long lastConsumedPosition;
-
-        private OldFormatDeserializer(CFMetaData metadata,
-                                      DataInputPlus in,
-                                      SerializationHelper helper,
-                                      DeletionTime partitionDeletion,
-                                      boolean readAllAsDynamic)
-        {
-            super(metadata, in, helper);
-            this.iterator = new UnfilteredIterator(partitionDeletion);
-            this.readAllAsDynamic = readAllAsDynamic;
-            this.lastConsumedPosition = currentPosition();
-        }
-
-        public void setSkipStatic()
-        {
-            this.skipStatic = true;
-        }
-
-        private boolean isStatic(Unfiltered unfiltered)
-        {
-            return unfiltered.isRow() && ((Row)unfiltered).isStatic();
-        }
-
-        public boolean hasNext() throws IOException
-        {
-            try
-            {
-                while (next == null)
-                {
-                    if (saved == null && !iterator.hasNext())
-                        return false;
-
-                    next = saved == null ? iterator.next() : saved;
-                    saved = null;
-
-                    // The sstable iterators assume that if there is one, the static row is the first thing this deserializer will return.
-                    // However, in the old format, a range tombstone with an empty start would sort before any static cell. So we should
-                    // detect that case and return the static parts first if necessary.
-                    if (isFirst && iterator.hasNext() && isStatic(iterator.peek()))
-                    {
-                        saved = next;
-                        next = iterator.next();
-                    }
-                    isFirst = false;
-
-                    // When reading old tables, we sometimes want to skip static data (due to how staticly defined column of compact
-                    // tables are handled).
-                    if (skipStatic && isStatic(next))
-                        next = null;
-                }
-                return true;
-            }
-            catch (IOError e)
-            {
-                if (e.getCause() != null && e.getCause() instanceof IOException)
-                    throw (IOException)e.getCause();
-                throw e;
-            }
-        }
-
-        private boolean isRow(LegacyLayout.LegacyAtom atom)
-        {
-            if (atom.isCell())
-                return true;
-
-            LegacyLayout.LegacyRangeTombstone tombstone = atom.asRangeTombstone();
-            return tombstone.isCollectionTombstone() || tombstone.isRowDeletion(metadata);
-        }
-
-        public int compareNextTo(ClusteringBound bound) throws IOException
-        {
-            if (!hasNext())
-                throw new IllegalStateException();
-            return metadata.comparator.compare(next.clustering(), bound);
-        }
-
-        public boolean nextIsRow() throws IOException
-        {
-            if (!hasNext())
-                throw new IllegalStateException();
-            return next.isRow();
-        }
-
-        public boolean nextIsStatic() throws IOException
-        {
-            return nextIsRow() && ((Row)next).isStatic();
-        }
-
-        private long currentPosition()
-        {
-            // We return a bogus value if the input is not file based, but check we never rely
-            // on that value in that case in bytesReadForUnconsumedData
-            return in instanceof FileDataInput ? ((FileDataInput)in).getFilePointer() : 0;
-        }
-
-        public Unfiltered readNext() throws IOException
-        {
-            if (!hasNext())
-                throw new IllegalStateException();
-            Unfiltered toReturn = next;
-            next = null;
-            lastConsumedPosition = currentPosition();
-            return toReturn;
-        }
-
-        public void skipNext() throws IOException
-        {
-            if (!hasNext())
-                throw new UnsupportedOperationException();
-            next = null;
-            lastConsumedPosition = currentPosition();
-        }
-
-        public long bytesReadForUnconsumedData()
-        {
-            if (!(in instanceof FileDataInput))
-                throw new AssertionError();
-
-            return currentPosition() - lastConsumedPosition;
-        }
-
-        public void clearState()
-        {
-            next = null;
-            saved = null;
-            iterator.clearState();
-            lastConsumedPosition = currentPosition();
-        }
+        isReady = false;
+        isDone = false;
+    }
 
-        // Groups atoms from the input into proper Unfiltered.
-        // Note: this could use guava AbstractIterator except that we want to be able to clear
-        // the internal state of the iterator so it's cleaner to do it ourselves.
-        private class UnfilteredIterator implements PeekingIterator<Unfiltered>
+    /**
+     * Skips the next atom.
+     */
+    public void skipNext() throws IOException
+    {
+        isReady = false;
+        clusteringDeserializer.skipNext();
+        if (UnfilteredSerializer.kind(nextFlags) == Unfiltered.Kind.RANGE_TOMBSTONE_MARKER)
         {
-            private final AtomIterator atoms;
-            private final LegacyLayout.CellGrouper grouper;
-            private final TombstoneTracker tombstoneTracker;
-
-            private Unfiltered next;
-
-            private UnfilteredIterator(DeletionTime partitionDeletion)
-            {
-                this.grouper = new LegacyLayout.CellGrouper(metadata, helper);
-                this.tombstoneTracker = new TombstoneTracker(partitionDeletion);
-                this.atoms = new AtomIterator();
-            }
-
-            public boolean hasNext()
-            {
-                // Note that we loop on next == null because TombstoneTracker.openNew() could return null below or the atom might be shadowed.
-                while (next == null)
-                {
-                    if (atoms.hasNext())
-                    {
-                        // If a range tombstone closes strictly before the next row/RT, we need to return that close (or boundary) marker first.
-                        if (tombstoneTracker.hasClosingMarkerBefore(atoms.peek()))
-                        {
-                            next = tombstoneTracker.popClosingMarker();
-                        }
-                        else
-                        {
-                            LegacyLayout.LegacyAtom atom = atoms.next();
-                            if (!tombstoneTracker.isShadowed(atom))
-                                next = isRow(atom) ? readRow(atom) : tombstoneTracker.openNew(atom.asRangeTombstone());
-                        }
-                    }
-                    else if (tombstoneTracker.hasOpenTombstones())
-                    {
-                        next = tombstoneTracker.popClosingMarker();
-                    }
-                    else
-                    {
-                        return false;
-                    }
-                }
-                return true;
-            }
-
-            private Unfiltered readRow(LegacyLayout.LegacyAtom first)
-            {
-                LegacyLayout.CellGrouper grouper = first.isStatic()
-                                                 ? LegacyLayout.CellGrouper.staticGrouper(metadata, helper)
-                                                 : this.grouper;
-                grouper.reset();
-                grouper.addAtom(first);
-                // As long as atoms are part of the same row, consume them. Note that the call to addAtom() uses
-                // atoms.peek() so that the atom is only consumed (by next) if it's part of the row (addAtom returns true)
-                while (atoms.hasNext() && grouper.addAtom(atoms.peek()))
-                {
-                    atoms.next();
-                }
-                return grouper.getRow();
-            }
-
-            public Unfiltered next()
-            {
-                if (!hasNext())
-                    throw new UnsupportedOperationException();
-                Unfiltered toReturn = next;
-                next = null;
-                return toReturn;
-            }
-
-            public Unfiltered peek()
-            {
-                if (!hasNext())
-                    throw new UnsupportedOperationException();
-                return next;
-            }
-
-            public void clearState()
-            {
-                atoms.clearState();
-                tombstoneTracker.clearState();
-                next = null;
-            }
-
-            public void remove()
-            {
-                throw new UnsupportedOperationException();
-            }
+            UnfilteredSerializer.serializer.skipMarkerBody(in);
         }
-
-        // Wraps the input of the deserializer to provide an iterator (and skip shadowed atoms).
-        // Note: this could use guava AbstractIterator except that we want to be able to clear
-        // the internal state of the iterator so it's cleaner to do it ourselves.
-        private class AtomIterator implements PeekingIterator<LegacyLayout.LegacyAtom>
-        {
-            private boolean isDone;
-            private LegacyLayout.LegacyAtom next;
-
-            private AtomIterator()
-            {
-            }
-
-            public boolean hasNext()
-            {
-                if (isDone)
-                    return false;
-
-                if (next == null)
-                {
-                    next = readAtom();
-                    if (next == null)
-                    {
-                        isDone = true;
-                        return false;
-                    }
-                }
-                return true;
-            }
-
-            private LegacyLayout.LegacyAtom readAtom()
-            {
-                try
-                {
-                    return LegacyLayout.readLegacyAtom(metadata, in, readAllAsDynamic);
-                }
-                catch (IOException e)
-                {
-                    throw new IOError(e);
-                }
-            }
-
-            public LegacyLayout.LegacyAtom next()
-            {
-                if (!hasNext())
-                    throw new UnsupportedOperationException();
-                LegacyLayout.LegacyAtom toReturn = next;
-                next = null;
-                return toReturn;
-            }
-
-            public LegacyLayout.LegacyAtom peek()
-            {
-                if (!hasNext())
-                    throw new UnsupportedOperationException();
-                return next;
-            }
-
-            public void clearState()
-            {
-                this.next = null;
-                this.isDone = false;
-            }
-
-            public void remove()
-            {
-                throw new UnsupportedOperationException();
-            }
-        }
-
-        /**
-         * Tracks which range tombstones are open when deserializing the old format.
-         */
-        private class TombstoneTracker
+        else
         {
-            private final DeletionTime partitionDeletion;
-
-            // Open tombstones sorted by their closing bound (i.e. first tombstone is the first to close).
-            // As we only track non-fully-shadowed ranges, the first range is necessarily the currently
-            // open tombstone (the one with the higher timestamp).
-            private final SortedSet<LegacyLayout.LegacyRangeTombstone> openTombstones;
-
-            public TombstoneTracker(DeletionTime partitionDeletion)
-            {
-                this.partitionDeletion = partitionDeletion;
-                this.openTombstones = new TreeSet<>((rt1, rt2) -> metadata.comparator.compare(rt1.stop.bound, rt2.stop.bound));
-            }
-
-            /**
-             * Checks if the provided atom is fully shadowed by the open tombstones of this tracker (or the partition deletion).
-             */
-            public boolean isShadowed(LegacyLayout.LegacyAtom atom)
-            {
-                assert !hasClosingMarkerBefore(atom);
-                long timestamp = atom.isCell() ? atom.asCell().timestamp : atom.asRangeTombstone().deletionTime.markedForDeleteAt();
-
-                if (partitionDeletion.deletes(timestamp))
-                    return true;
-
-                SortedSet<LegacyLayout.LegacyRangeTombstone> coveringTombstones = isRow(atom) ? openTombstones : openTombstones.tailSet(atom.asRangeTombstone());
-                return Iterables.any(coveringTombstones, tombstone -> tombstone.deletionTime.deletes(timestamp));
-            }
-
-            /**
-             * Whether the currently open marker closes stricly before the provided row/RT.
-             */
-            public boolean hasClosingMarkerBefore(LegacyLayout.LegacyAtom atom)
-            {
-                return !openTombstones.isEmpty()
-                    && metadata.comparator.compare(openTombstones.first().stop.bound, atom.clustering()) < 0;
-            }
-
-            /**
-             * Returns the unfiltered corresponding to closing the currently open marker (and update the tracker accordingly).
-             */
-            public Unfiltered popClosingMarker()
-            {
-                assert !openTombstones.isEmpty();
-
-                Iterator<LegacyLayout.LegacyRangeTombstone> iter = openTombstones.iterator();
-                LegacyLayout.LegacyRangeTombstone first = iter.next();
-                iter.remove();
-
-                // If that was the last open tombstone, we just want to close it. Otherwise, we have a boundary with the
-                // next tombstone
-                if (!iter.hasNext())
-                    return new RangeTombstoneBoundMarker(first.stop.bound, first.deletionTime);
-
-                LegacyLayout.LegacyRangeTombstone next = iter.next();
-                return RangeTombstoneBoundaryMarker.makeBoundary(false, first.stop.bound, first.stop.bound.invert(), first.deletionTime, next.deletionTime);
-            }
-
-            /**
-             * Update the tracker given the provided newly open tombstone. This return the Unfiltered corresponding to the opening
-             * of said tombstone: this can be a simple open mark, a boundary (if there was an open tombstone superseded by this new one)
-             * or even null (if the new tombston start is supersedes by the currently open tombstone).
-             *
-             * Note that this method assume the added tombstone is not fully shadowed, i.e. that !isShadowed(tombstone). It also
-             * assumes no opened tombstone closes before that tombstone (so !hasClosingMarkerBefore(tombstone)).
-             */
-            public Unfiltered openNew(LegacyLayout.LegacyRangeTombstone tombstone)
-            {
-                if (openTombstones.isEmpty())
-                {
-                    openTombstones.add(tombstone);
-                    return new RangeTombstoneBoundMarker(tombstone.start.bound, tombstone.deletionTime);
-                }
-
-                Iterator<LegacyLayout.LegacyRangeTombstone> iter = openTombstones.iterator();
-                LegacyLayout.LegacyRangeTombstone first = iter.next();
-                if (tombstone.deletionTime.supersedes(first.deletionTime))
-                {
-                    // We're supperseding the currently open tombstone, so we should produce a boundary that close the currently open
-                    // one and open the new one. We should also add the tombstone, but if it stop after the first one, we should
-                    // also remove that first tombstone as it won't be useful anymore.
-                    if (metadata.comparator.compare(tombstone.stop.bound, first.stop.bound) >= 0)
-                        iter.remove();
-
-                    openTombstones.add(tombstone);
-                    return RangeTombstoneBoundaryMarker.makeBoundary(false, tombstone.start.bound.invert(), tombstone.start.bound, first.deletionTime, tombstone.deletionTime);
-                }
-                else
-                {
-                    // If the new tombstone don't supersedes the currently open tombstone, we don't have anything to return, we
-                    // just add the new tombstone (because we know tombstone is not fully shadowed, this imply the new tombstone
-                    // simply extend after the first one and we'll deal with it later)
-                    assert metadata.comparator.compare(tombstone.start.bound, first.stop.bound) > 0;
-                    openTombstones.add(tombstone);
-                    return null;
-                }
-            }
-
-            public boolean hasOpenTombstones()
-            {
-                return !openTombstones.isEmpty();
-            }
-
-            public void clearState()
-            {
-                openTombstones.clear();
-            }
+            UnfilteredSerializer.serializer.skipRowBody(in);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java b/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java
index f7e614a..d435832 100644
--- a/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/AbstractSSTableIterator.java
@@ -89,12 +89,6 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator
                 //   - we're querying static columns.
                 boolean needSeekAtPartitionStart = !indexEntry.isIndexed() || !columns.fetchedColumns().statics.isEmpty();
 
-                // For CQL queries on static compact tables, we only want to consider static value (only those are exposed),
-                // but readStaticRow have already read them and might in fact have consumed the whole partition (when reading
-                // the legacy file format), so set the reader to null so we don't try to read anything more. We can remove this
-                // once we drop support for the legacy file format
-                boolean needsReader = sstable.descriptor.version.storeRows() || isForThrift || !sstable.metadata.isStaticCompactTable();
-
                 if (needSeekAtPartitionStart)
                 {
                     // Not indexed (or is reading static), set to the beginning of the partition and read partition level deletion there
@@ -108,14 +102,14 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator
 
                     // Note that this needs to be called after file != null and after the partitionDeletion has been set, but before readStaticRow
                     // (since it uses it) so we can't move that up (but we'll be able to simplify as soon as we drop support for the old file format).
-                    this.reader = needsReader ? createReader(indexEntry, file, shouldCloseFile) : null;
-                    this.staticRow = readStaticRow(sstable, file, helper, columns.fetchedColumns().statics, isForThrift, reader == null ? null : reader.deserializer);
+                    this.reader = createReader(indexEntry, file, shouldCloseFile);
+                    this.staticRow = readStaticRow(sstable, file, helper, columns.fetchedColumns().statics, isForThrift, reader.deserializer);
                 }
                 else
                 {
                     this.partitionLevelDeletion = indexEntry.deletionTime();
                     this.staticRow = Rows.EMPTY_STATIC_ROW;
-                    this.reader = needsReader ? createReader(indexEntry, file, shouldCloseFile) : null;
+                    this.reader = createReader(indexEntry, file, shouldCloseFile);
                 }
 
                 if (reader != null && !slices.isEmpty())
@@ -168,33 +162,6 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator
                                      boolean isForThrift,
                                      UnfilteredDeserializer deserializer) throws IOException
     {
-        if (!sstable.descriptor.version.storeRows())
-        {
-            if (!sstable.metadata.isCompactTable())
-            {
-                assert deserializer != null;
-                return deserializer.hasNext() && deserializer.nextIsStatic()
-                     ? (Row)deserializer.readNext()
-                     : Rows.EMPTY_STATIC_ROW;
-            }
-
-            // For compact tables, we use statics for the "column_metadata" definition. However, in the old format, those
-            // "column_metadata" are intermingled as any other "cell". In theory, this means that we'd have to do a first
-            // pass to extract the static values. However, for thrift, we'll use the ThriftResultsMerger right away which
-            // will re-merge static values with dynamic ones, so we can just ignore static and read every cell as a
-            // "dynamic" one. For CQL, if the table is a "static compact", then is has only static columns exposed and no
-            // dynamic ones. So we do a pass to extract static columns here, but will have no more work to do. Otherwise,
-            // the table won't have static columns.
-            if (statics.isEmpty() || isForThrift)
-                return Rows.EMPTY_STATIC_ROW;
-
-            assert sstable.metadata.isStaticCompactTable();
-
-            // As said above, if it's a CQL query and the table is a "static compact", the only exposed columns are the
-            // static ones. So we don't have to mark the position to seek back later.
-            return LegacyLayout.extractStaticColumns(sstable.metadata, file, statics);
-        }
-
         if (!sstable.header.hasStatic())
             return Rows.EMPTY_STATIC_ROW;
 
@@ -345,7 +312,7 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator
         private void createDeserializer()
         {
             assert file != null && deserializer == null;
-            deserializer = UnfilteredDeserializer.create(sstable.metadata, file, sstable.header, helper, partitionLevelDeletion, isForThrift);
+            deserializer = UnfilteredDeserializer.create(sstable.metadata, file, sstable.header, helper);
         }
 
         protected void seekToPosition(long position) throws IOException
@@ -550,8 +517,7 @@ public abstract class AbstractSSTableIterator implements UnfilteredRowIterator
         public boolean isPastCurrentBlock() throws IOException
         {
             assert reader.deserializer != null;
-            long correction = reader.deserializer.bytesReadForUnconsumedData();
-            return reader.file.bytesPastMark(mark) - correction >= currentIndex().width;
+            return reader.file.bytesPastMark(mark) >= currentIndex().width;
         }
 
         public int currentBlockIdx()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
index b3c2e94..aa0a390 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableIterator.java
@@ -257,12 +257,10 @@ public class SSTableIterator extends AbstractSSTableIterator
             // so if currentIdx == lastBlockIdx and slice.end < indexes[currentIdx].firstName, we're guaranteed that the
             // whole slice is between the previous block end and this block start, and thus has no corresponding
             // data. One exception is if the previous block ends with an openMarker as it will cover our slice
-            // and we need to return it (we also don't skip the slice for the old format because we didn't have the openMarker
-            // info in that case and can't rely on this optimization).
+            // and we need to return it.
             if (indexState.currentBlockIdx() == lastBlockIdx
                 && metadata().comparator.compare(slice.end(), indexState.currentIndex().firstName) < 0
-                && openMarker == null
-                && sstable.descriptor.version.storeRows())
+                && openMarker == null)
             {
                 sliceDone = true;
             }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
index c74b5db..ca0cce2 100644
--- a/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
+++ b/src/java/org/apache/cassandra/db/columniterator/SSTableReversedIterator.java
@@ -310,23 +310,7 @@ public class SSTableReversedIterator extends AbstractSSTableIterator
             int currentBlock = indexState.currentBlockIdx();
 
             boolean canIncludeSliceStart = currentBlock == lastBlockIdx;
-
-            // When dealing with old format sstable, we have the problem that a row can span 2 index block, i.e. it can
-            // start at the end of a block and end at the beginning of the next one. That's not a problem per se for
-            // UnfilteredDeserializer.OldFormatSerializer, since it always read rows entirely, even if they span index
-            // blocks, but as we reading index block in reverse we must be careful to not read the end of the row at
-            // beginning of a block before we're reading the beginning of that row. So what we do is that if we detect
-            // that the row starting this block is also the row ending the previous one, we skip that first result and
-            // let it be read when we'll read the previous block.
-            boolean includeFirst = true;
-            if (!sstable.descriptor.version.storeRows() && currentBlock > 0)
-            {
-                ClusteringPrefix lastOfPrevious = indexState.index(currentBlock - 1).lastName;
-                ClusteringPrefix firstOfCurrent = indexState.index(currentBlock).firstName;
-                includeFirst = metadata().comparator.compare(lastOfPrevious, firstOfCurrent) != 0;
-            }
-
-            loadFromDisk(canIncludeSliceStart ? slice.start() : null, canIncludeSliceEnd ? slice.end() : null, includeFirst);
+            loadFromDisk(canIncludeSliceStart ? slice.start() : null, canIncludeSliceEnd ? slice.end() : null, true);
         }
 
         @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
index a30ca0e..c7c5971 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
@@ -233,7 +233,7 @@ public class CommitLogArchiver
                     throw new IllegalStateException("Cannot safely construct descriptor for segment, either from its name or its header: " + fromFile.getPath());
                 else if (fromHeader != null && fromName != null && !fromHeader.equalsIgnoringCompression(fromName))
                     throw new IllegalStateException(String.format("Cannot safely construct descriptor for segment, as name and header descriptors do not match (%s vs %s): %s", fromHeader, fromName, fromFile.getPath()));
-                else if (fromName != null && fromHeader == null && fromName.version >= CommitLogDescriptor.VERSION_21)
+                else if (fromName != null && fromHeader == null)
                     throw new IllegalStateException("Cannot safely construct descriptor for segment, as name descriptor implies a version that should contain a header descriptor, but that descriptor could not be read: " + fromFile.getPath());
                 else if (fromHeader != null)
                     descriptor = fromHeader;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
index 088d44a..0ab191d 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogDescriptor.java
@@ -57,10 +57,7 @@ public class CommitLogDescriptor
     static final String COMPRESSION_PARAMETERS_KEY = "compressionParameters";
     static final String COMPRESSION_CLASS_KEY = "compressionClass";
 
-    public static final int VERSION_12 = 2;
-    public static final int VERSION_20 = 3;
-    public static final int VERSION_21 = 4;
-    public static final int VERSION_22 = 5;
+    // We don't support anything pre-3.0
     public static final int VERSION_30 = 6;
 
     /**
@@ -104,20 +101,15 @@ public class CommitLogDescriptor
         out.putLong(descriptor.id);
         updateChecksumInt(crc, (int) (descriptor.id & 0xFFFFFFFFL));
         updateChecksumInt(crc, (int) (descriptor.id >>> 32));
-        if (descriptor.version >= VERSION_22)
-        {
-            String parametersString = constructParametersString(descriptor.compression, descriptor.encryptionContext, additionalHeaders);
-            byte[] parametersBytes = parametersString.getBytes(StandardCharsets.UTF_8);
-            if (parametersBytes.length != (((short) parametersBytes.length) & 0xFFFF))
-                throw new ConfigurationException(String.format("Compression parameters too long, length %d cannot be above 65535.",
-                                                               parametersBytes.length));
-            out.putShort((short) parametersBytes.length);
-            updateChecksumInt(crc, parametersBytes.length);
-            out.put(parametersBytes);
-            crc.update(parametersBytes, 0, parametersBytes.length);
-        }
-        else
-            assert descriptor.compression == null;
+        String parametersString = constructParametersString(descriptor.compression, descriptor.encryptionContext, additionalHeaders);
+        byte[] parametersBytes = parametersString.getBytes(StandardCharsets.UTF_8);
+        if (parametersBytes.length != (((short) parametersBytes.length) & 0xFFFF))
+            throw new ConfigurationException(String.format("Compression parameters too long, length %d cannot be above 65535.",
+                        parametersBytes.length));
+        out.putShort((short) parametersBytes.length);
+        updateChecksumInt(crc, parametersBytes.length);
+        out.put(parametersBytes);
+        crc.update(parametersBytes, 0, parametersBytes.length);
         out.putInt((int) crc.getValue());
     }
 
@@ -157,16 +149,15 @@ public class CommitLogDescriptor
     {
         CRC32 checkcrc = new CRC32();
         int version = input.readInt();
+        if (version < VERSION_30)
+            throw new IllegalArgumentException("Unsupported pre-3.0 commit log found; cannot read.");
+
         updateChecksumInt(checkcrc, version);
         long id = input.readLong();
         updateChecksumInt(checkcrc, (int) (id & 0xFFFFFFFFL));
         updateChecksumInt(checkcrc, (int) (id >>> 32));
-        int parametersLength = 0;
-        if (version >= VERSION_22)
-        {
-            parametersLength = input.readShort() & 0xFFFF;
-            updateChecksumInt(checkcrc, parametersLength);
-        }
+        int parametersLength = input.readShort() & 0xFFFF;
+        updateChecksumInt(checkcrc, parametersLength);
         // This should always succeed as parametersLength cannot be too long even for a
         // corrupt segment file.
         byte[] parametersBytes = new byte[parametersLength];
@@ -213,14 +204,6 @@ public class CommitLogDescriptor
     {
         switch (version)
         {
-            case VERSION_12:
-                return MessagingService.VERSION_12;
-            case VERSION_20:
-                return MessagingService.VERSION_20;
-            case VERSION_21:
-                return MessagingService.VERSION_21;
-            case VERSION_22:
-                return MessagingService.VERSION_22;
             case VERSION_30:
                 return MessagingService.VERSION_30;
             default:

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
index e6e2e1a..eb745c7 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReader.java
@@ -122,19 +122,6 @@ public class CommitLogReader
 
         try(RandomAccessReader reader = RandomAccessReader.open(file))
         {
-            if (desc.version < CommitLogDescriptor.VERSION_21)
-            {
-                if (!shouldSkipSegmentId(file, desc, minPosition))
-                {
-                    if (minPosition.segmentId == desc.id)
-                        reader.seek(minPosition.position);
-                    ReadStatusTracker statusTracker = new ReadStatusTracker(mutationLimit, tolerateTruncation);
-                    statusTracker.errorContext = desc.fileName();
-                    readSection(handler, reader, minPosition, (int) reader.length(), statusTracker, desc);
-                }
-                return;
-            }
-
             final long segmentIdFromFilename = desc.id;
             try
             {
@@ -430,42 +417,17 @@ public class CommitLogReader
     {
         public static long calculateClaimedChecksum(FileDataInput input, int commitLogVersion) throws IOException
         {
-            switch (commitLogVersion)
-            {
-                case CommitLogDescriptor.VERSION_12:
-                case CommitLogDescriptor.VERSION_20:
-                    return input.readLong();
-                // Changed format in 2.1
-                default:
-                    return input.readInt() & 0xffffffffL;
-            }
+            return input.readInt() & 0xffffffffL;
         }
 
         public static void updateChecksum(CRC32 checksum, int serializedSize, int commitLogVersion)
         {
-            switch (commitLogVersion)
-            {
-                case CommitLogDescriptor.VERSION_12:
-                    checksum.update(serializedSize);
-                    break;
-                // Changed format in 2.0
-                default:
-                    updateChecksumInt(checksum, serializedSize);
-                    break;
-            }
+            updateChecksumInt(checksum, serializedSize);
         }
 
         public static long calculateClaimedCRC32(FileDataInput input, int commitLogVersion) throws IOException
         {
-            switch (commitLogVersion)
-            {
-                case CommitLogDescriptor.VERSION_12:
-                case CommitLogDescriptor.VERSION_20:
-                    return input.readLong();
-                // Changed format in 2.1
-                default:
-                    return input.readInt() & 0xffffffffL;
-            }
+            return input.readInt() & 0xffffffffL;
         }
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index a22cda5..78650f1 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -1242,7 +1242,7 @@ public class CompactionManager implements CompactionManagerMBean
             header = SerializationHeader.make(sstable.metadata, Collections.singleton(sstable));
 
         return SSTableWriter.create(cfs.metadata,
-                                    Descriptor.fromFilename(cfs.getSSTablePath(compactionFileLocation)),
+                                    cfs.newSSTableDescriptor(compactionFileLocation),
                                     expectedBloomFilterSize,
                                     repairedAt,
                                     sstable.getSSTableLevel(),
@@ -1274,7 +1274,7 @@ public class CompactionManager implements CompactionManagerMBean
                 break;
             }
         }
-        return SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(compactionFileLocation)),
+        return SSTableWriter.create(cfs.newSSTableDescriptor(compactionFileLocation),
                                     (long) expectedBloomFilterSize,
                                     repairedAt,
                                     cfs.metadata,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/Upgrader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Upgrader.java b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
index 7a5b719..aedb208 100644
--- a/src/java/org/apache/cassandra/db/compaction/Upgrader.java
+++ b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
@@ -70,7 +70,7 @@ public class Upgrader
     {
         MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.getComparator());
         sstableMetadataCollector.sstableLevel(sstable.getSSTableLevel());
-        return SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(directory)),
+        return SSTableWriter.create(cfs.newSSTableDescriptor(directory),
                                     estimatedRows,
                                     repairedAt,
                                     cfs.metadata,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/Verifier.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Verifier.java b/src/java/org/apache/cassandra/db/compaction/Verifier.java
index df659e4..a52dd82 100644
--- a/src/java/org/apache/cassandra/db/compaction/Verifier.java
+++ b/src/java/org/apache/cassandra/db/compaction/Verifier.java
@@ -97,8 +97,7 @@ public class Verifier implements Closeable
         {
             validator = null;
 
-            if (sstable.descriptor.digestComponent != null &&
-                new File(sstable.descriptor.filenameFor(sstable.descriptor.digestComponent)).exists())
+            if (new File(sstable.descriptor.filenameFor(Component.DIGEST)).exists())
             {
                 validator = DataIntegrityMetadata.fileDigestValidator(sstable.descriptor);
                 validator.validate();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
index f8ecd87..d279321 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/DefaultCompactionWriter.java
@@ -69,7 +69,7 @@ public class DefaultCompactionWriter extends CompactionAwareWriter
     public void switchCompactionLocation(Directories.DataDirectory directory)
     {
         @SuppressWarnings("resource")
-        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(getDirectories().getLocationForDisk(directory))),
+        SSTableWriter writer = SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(directory)),
                                                     estimatedTotalKeys,
                                                     minRepairedAt,
                                                     cfs.metadata,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java
index 0beb505..a3d8c98 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/MajorLeveledCompactionWriter.java
@@ -105,7 +105,7 @@ public class MajorLeveledCompactionWriter extends CompactionAwareWriter
     {
         this.sstableDirectory = location;
         averageEstimatedKeysPerSSTable = Math.round(((double) averageEstimatedKeysPerSSTable * sstablesWritten + partitionsWritten) / (sstablesWritten + 1));
-        sstableWriter.switchWriter(SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(getDirectories().getLocationForDisk(sstableDirectory))),
+        sstableWriter.switchWriter(SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)),
                 keysPerSSTable,
                 minRepairedAt,
                 cfs.metadata,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java
index 864185e..7acb870 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/MaxSSTableSizeWriter.java
@@ -108,7 +108,7 @@ public class MaxSSTableSizeWriter extends CompactionAwareWriter
     {
         sstableDirectory = location;
         @SuppressWarnings("resource")
-        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(getDirectories().getLocationForDisk(sstableDirectory))),
+        SSTableWriter writer = SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(sstableDirectory)),
                                                     estimatedTotalKeys / estimatedSSTables,
                                                     minRepairedAt,
                                                     cfs.metadata,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
index 46cb891..a01672e 100644
--- a/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
+++ b/src/java/org/apache/cassandra/db/compaction/writers/SplittingSizeTieredCompactionWriter.java
@@ -104,7 +104,7 @@ public class SplittingSizeTieredCompactionWriter extends CompactionAwareWriter
         this.location = location;
         long currentPartitionsToWrite = Math.round(ratios[currentRatioIndex] * estimatedTotalKeys);
         @SuppressWarnings("resource")
-        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(cfs.getSSTablePath(getDirectories().getLocationForDisk(location))),
+        SSTableWriter writer = SSTableWriter.create(cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(location)),
                                                     currentPartitionsToWrite,
                                                     minRepairedAt,
                                                     cfs.metadata,


[10/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0

Posted by sl...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/RangeSliceVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RangeSliceVerbHandler.java b/src/java/org/apache/cassandra/db/RangeSliceVerbHandler.java
deleted file mode 100644
index 55826f5..0000000
--- a/src/java/org/apache/cassandra/db/RangeSliceVerbHandler.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.db;
-
-import org.apache.cassandra.io.IVersionedSerializer;
-
-public class RangeSliceVerbHandler extends ReadCommandVerbHandler
-{
-    @Override
-    protected IVersionedSerializer<ReadResponse> serializer()
-    {
-        return ReadResponse.rangeSliceSerializer;
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/ReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java
index d8051fe..0bda184 100644
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@ -37,7 +37,6 @@ import org.apache.cassandra.db.transform.Transformation;
 import org.apache.cassandra.dht.AbstractBounds;
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.index.IndexNotAvailableException;
-import org.apache.cassandra.io.ForwardingVersionedSerializer;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
@@ -64,43 +63,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
     protected static final Logger logger = LoggerFactory.getLogger(ReadCommand.class);
     public static final IVersionedSerializer<ReadCommand> serializer = new Serializer();
 
-    // For READ verb: will either dispatch on 'serializer' for 3.0 or 'legacyReadCommandSerializer' for earlier version.
-    // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility.
-    public static final IVersionedSerializer<ReadCommand> readSerializer = new ForwardingVersionedSerializer<ReadCommand>()
-    {
-        protected IVersionedSerializer<ReadCommand> delegate(int version)
-        {
-            return version < MessagingService.VERSION_30
-                    ? legacyReadCommandSerializer : serializer;
-        }
-    };
-
-    // For RANGE_SLICE verb: will either dispatch on 'serializer' for 3.0 or 'legacyRangeSliceCommandSerializer' for earlier version.
-    // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility.
-    public static final IVersionedSerializer<ReadCommand> rangeSliceSerializer = new ForwardingVersionedSerializer<ReadCommand>()
-    {
-        protected IVersionedSerializer<ReadCommand> delegate(int version)
-        {
-            return version < MessagingService.VERSION_30
-                    ? legacyRangeSliceCommandSerializer : serializer;
-        }
-    };
-
-    // For PAGED_RANGE verb: will either dispatch on 'serializer' for 3.0 or 'legacyPagedRangeCommandSerializer' for earlier version.
-    // Can be removed (and replaced by 'serializer') once we drop pre-3.0 backward compatibility.
-    public static final IVersionedSerializer<ReadCommand> pagedRangeSerializer = new ForwardingVersionedSerializer<ReadCommand>()
-    {
-        protected IVersionedSerializer<ReadCommand> delegate(int version)
-        {
-            return version < MessagingService.VERSION_30
-                    ? legacyPagedRangeCommandSerializer : serializer;
-        }
-    };
-
-    public static final IVersionedSerializer<ReadCommand> legacyRangeSliceCommandSerializer = new LegacyRangeSliceCommandSerializer();
-    public static final IVersionedSerializer<ReadCommand> legacyPagedRangeCommandSerializer = new LegacyPagedRangeCommandSerializer();
-    public static final IVersionedSerializer<ReadCommand> legacyReadCommandSerializer = new LegacyReadCommandSerializer();
-
     private final Kind kind;
     private final CFMetaData metadata;
     private final int nowInSec;
@@ -580,7 +542,7 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
     /**
      * Creates a message for this command.
      */
-    public abstract MessageOut<ReadCommand> createMessage(int version);
+    public abstract MessageOut<ReadCommand> createMessage();
 
     protected abstract void appendCQLWhereClause(StringBuilder sb);
 
@@ -666,8 +628,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
 
         public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException
         {
-            assert version >= MessagingService.VERSION_30;
-
             out.writeByte(command.kind.ordinal());
             out.writeByte(digestFlag(command.isDigestQuery()) | thriftFlag(command.isForThrift()) | indexFlag(command.index.isPresent()));
             if (command.isDigestQuery())
@@ -685,8 +645,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
 
         public ReadCommand deserialize(DataInputPlus in, int version) throws IOException
         {
-            assert version >= MessagingService.VERSION_30;
-
             Kind kind = Kind.values()[in.readByte()];
             int flags = in.readByte();
             boolean isDigest = isDigest(flags);
@@ -699,8 +657,8 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
             RowFilter rowFilter = RowFilter.serializer.deserialize(in, version, metadata);
             DataLimits limits = DataLimits.serializer.deserialize(in, version,  metadata.comparator);
             Optional<IndexMetadata> index = hasIndex
-                                            ? deserializeIndexMetadata(in, version, metadata)
-                                            : Optional.empty();
+                                          ? deserializeIndexMetadata(in, version, metadata)
+                                          : Optional.empty();
 
             return kind.selectionDeserializer.deserialize(in, version, isDigest, digestVersion, isForThrift, metadata, nowInSec, columnFilter, rowFilter, limits, index);
         }
@@ -724,8 +682,6 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
 
         public long serializedSize(ReadCommand command, int version)
         {
-            assert version >= MessagingService.VERSION_30;
-
             return 2 // kind + flags
                  + (command.isDigestQuery() ? TypeSizes.sizeofUnsignedVInt(command.digestVersion()) : 0)
                  + CFMetaData.serializer.serializedSize(command.metadata(), version)
@@ -737,1015 +693,4 @@ public abstract class ReadCommand extends MonitorableImpl implements ReadQuery
                  + command.indexSerializedSize(version);
         }
     }
-
-    private enum LegacyType
-    {
-        GET_BY_NAMES((byte)1),
-        GET_SLICES((byte)2);
-
-        public final byte serializedValue;
-
-        LegacyType(byte b)
-        {
-            this.serializedValue = b;
-        }
-
-        public static LegacyType fromPartitionFilterKind(ClusteringIndexFilter.Kind kind)
-        {
-            return kind == ClusteringIndexFilter.Kind.SLICE
-                   ? GET_SLICES
-                   : GET_BY_NAMES;
-        }
-
-        public static LegacyType fromSerializedValue(byte b)
-        {
-            return b == 1 ? GET_BY_NAMES : GET_SLICES;
-        }
-    }
-
-    /**
-     * Serializer for pre-3.0 RangeSliceCommands.
-     */
-    private static class LegacyRangeSliceCommandSerializer implements IVersionedSerializer<ReadCommand>
-    {
-        public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command;
-            assert !rangeCommand.dataRange().isPaging();
-
-            // convert pre-3.0 incompatible names filters to slice filters
-            rangeCommand = maybeConvertNamesToSlice(rangeCommand);
-
-            CFMetaData metadata = rangeCommand.metadata();
-
-            out.writeUTF(metadata.ksName);
-            out.writeUTF(metadata.cfName);
-            out.writeLong(rangeCommand.nowInSec() * 1000L);  // convert from seconds to millis
-
-            // begin DiskAtomFilterSerializer.serialize()
-            if (rangeCommand.isNamesQuery())
-            {
-                out.writeByte(1);  // 0 for slices, 1 for names
-                ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter;
-                LegacyReadCommandSerializer.serializeNamesFilter(rangeCommand, filter, out);
-            }
-            else
-            {
-                out.writeByte(0);  // 0 for slices, 1 for names
-
-                // slice filter serialization
-                ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter;
-
-                boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-                LegacyReadCommandSerializer.serializeSlices(out, filter.requestedSlices(), filter.isReversed(), makeStaticSlice, metadata);
-
-                out.writeBoolean(filter.isReversed());
-
-                // limit
-                DataLimits limits = rangeCommand.limits();
-                if (limits.isDistinct())
-                    out.writeInt(1);
-                else
-                    out.writeInt(LegacyReadCommandSerializer.updateLimitForQuery(rangeCommand.limits().count(), filter.requestedSlices()));
-
-                int compositesToGroup;
-                boolean selectsStatics = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-                if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT)
-                    compositesToGroup = -1;
-                else if (limits.isDistinct() && !selectsStatics)
-                    compositesToGroup = -2;  // for DISTINCT queries (CASSANDRA-8490)
-                else
-                    compositesToGroup = metadata.isDense() ? -1 : metadata.clusteringColumns().size();
-
-                out.writeInt(compositesToGroup);
-            }
-
-            serializeRowFilter(out, rangeCommand.rowFilter());
-            AbstractBounds.rowPositionSerializer.serialize(rangeCommand.dataRange().keyRange(), out, version);
-
-            // maxResults
-            out.writeInt(rangeCommand.limits().count());
-
-            // countCQL3Rows
-            if (rangeCommand.isForThrift() || rangeCommand.limits().perPartitionCount() == 1)  // if for Thrift or DISTINCT
-                out.writeBoolean(false);
-            else
-                out.writeBoolean(true);
-
-            // isPaging
-            out.writeBoolean(false);
-        }
-
-        public ReadCommand deserialize(DataInputPlus in, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            String keyspace = in.readUTF();
-            String columnFamily = in.readUTF();
-
-            CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);
-            if (metadata == null)
-            {
-                String message = String.format("Got legacy range command for nonexistent table %s.%s.", keyspace, columnFamily);
-                throw new UnknownColumnFamilyException(message, null);
-            }
-
-            int nowInSec = (int) (in.readLong() / 1000);  // convert from millis to seconds
-
-            ClusteringIndexFilter filter;
-            ColumnFilter selection;
-            int compositesToGroup = 0;
-            int perPartitionLimit = -1;
-            byte readType = in.readByte();  // 0 for slices, 1 for names
-            if (readType == 1)
-            {
-                Pair<ColumnFilter, ClusteringIndexNamesFilter> selectionAndFilter = LegacyReadCommandSerializer.deserializeNamesSelectionAndFilter(in, metadata);
-                selection = selectionAndFilter.left;
-                filter = selectionAndFilter.right;
-            }
-            else
-            {
-                Pair<ClusteringIndexSliceFilter, Boolean> p = LegacyReadCommandSerializer.deserializeSlicePartitionFilter(in, metadata);
-                filter = p.left;
-                perPartitionLimit = in.readInt();
-                compositesToGroup = in.readInt();
-                selection = getColumnSelectionForSlice(p.right, compositesToGroup, metadata);
-            }
-
-            RowFilter rowFilter = deserializeRowFilter(in, metadata);
-
-            AbstractBounds<PartitionPosition> keyRange = AbstractBounds.rowPositionSerializer.deserialize(in, metadata.partitioner, version);
-            int maxResults = in.readInt();
-
-            boolean countCQL3Rows = in.readBoolean();  // countCQL3Rows (not needed)
-            in.readBoolean();  // isPaging (not needed)
-
-            boolean selectsStatics = (!selection.fetchedColumns().statics.isEmpty() || filter.selects(Clustering.STATIC_CLUSTERING));
-            // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former,
-            // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter one is slightly less
-            // direct, but we know that on 2.1/2.2 queries, DISTINCT queries are the only CQL queries that have countCQL3Rows to false so we use
-            // that fact.
-            boolean isDistinct = compositesToGroup == -2 || (compositesToGroup != -1 && !countCQL3Rows);
-            DataLimits limits;
-            if (isDistinct)
-                limits = DataLimits.distinctLimits(maxResults);
-            else if (compositesToGroup == -1)
-                limits = DataLimits.thriftLimits(maxResults, perPartitionLimit);
-            else
-                limits = DataLimits.cqlLimits(maxResults);
-
-            return new PartitionRangeReadCommand(false, 0, true, metadata, nowInSec, selection, rowFilter, limits, new DataRange(keyRange, filter), Optional.empty());
-        }
-
-        static void serializeRowFilter(DataOutputPlus out, RowFilter rowFilter) throws IOException
-        {
-            ArrayList<RowFilter.Expression> indexExpressions = Lists.newArrayList(rowFilter.iterator());
-            out.writeInt(indexExpressions.size());
-            for (RowFilter.Expression expression : indexExpressions)
-            {
-                ByteBufferUtil.writeWithShortLength(expression.column().name.bytes, out);
-                expression.operator().writeTo(out);
-                ByteBufferUtil.writeWithShortLength(expression.getIndexValue(), out);
-            }
-        }
-
-        static RowFilter deserializeRowFilter(DataInputPlus in, CFMetaData metadata) throws IOException
-        {
-            int numRowFilters = in.readInt();
-            if (numRowFilters == 0)
-                return RowFilter.NONE;
-
-            RowFilter rowFilter = RowFilter.create(numRowFilters);
-            for (int i = 0; i < numRowFilters; i++)
-            {
-                ByteBuffer columnName = ByteBufferUtil.readWithShortLength(in);
-                ColumnDefinition column = metadata.getColumnDefinition(columnName);
-                Operator op = Operator.readFrom(in);
-                ByteBuffer indexValue = ByteBufferUtil.readWithShortLength(in);
-                rowFilter.add(column, op, indexValue);
-            }
-            return rowFilter;
-        }
-
-        static long serializedRowFilterSize(RowFilter rowFilter)
-        {
-            long size = TypeSizes.sizeof(0);  // rowFilterCount
-            for (RowFilter.Expression expression : rowFilter)
-            {
-                size += ByteBufferUtil.serializedSizeWithShortLength(expression.column().name.bytes);
-                size += TypeSizes.sizeof(0);  // operator int value
-                size += ByteBufferUtil.serializedSizeWithShortLength(expression.getIndexValue());
-            }
-            return size;
-        }
-
-        public long serializedSize(ReadCommand command, int version)
-        {
-            assert version < MessagingService.VERSION_30;
-            assert command.kind == Kind.PARTITION_RANGE;
-
-            PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command;
-            rangeCommand = maybeConvertNamesToSlice(rangeCommand);
-            CFMetaData metadata = rangeCommand.metadata();
-
-            long size = TypeSizes.sizeof(metadata.ksName);
-            size += TypeSizes.sizeof(metadata.cfName);
-            size += TypeSizes.sizeof((long) rangeCommand.nowInSec());
-
-            size += 1;  // single byte flag: 0 for slices, 1 for names
-            if (rangeCommand.isNamesQuery())
-            {
-                PartitionColumns columns = rangeCommand.columnFilter().fetchedColumns();
-                ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter;
-                size += LegacyReadCommandSerializer.serializedNamesFilterSize(filter, metadata, columns);
-            }
-            else
-            {
-                ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter;
-                boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-                size += LegacyReadCommandSerializer.serializedSlicesSize(filter.requestedSlices(), makeStaticSlice, metadata);
-                size += TypeSizes.sizeof(filter.isReversed());
-                size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount());
-                size += TypeSizes.sizeof(0); // compositesToGroup
-            }
-
-            if (rangeCommand.rowFilter().equals(RowFilter.NONE))
-            {
-                size += TypeSizes.sizeof(0);
-            }
-            else
-            {
-                ArrayList<RowFilter.Expression> indexExpressions = Lists.newArrayList(rangeCommand.rowFilter().iterator());
-                size += TypeSizes.sizeof(indexExpressions.size());
-                for (RowFilter.Expression expression : indexExpressions)
-                {
-                    size += ByteBufferUtil.serializedSizeWithShortLength(expression.column().name.bytes);
-                    size += TypeSizes.sizeof(expression.operator().ordinal());
-                    size += ByteBufferUtil.serializedSizeWithShortLength(expression.getIndexValue());
-                }
-            }
-
-            size += AbstractBounds.rowPositionSerializer.serializedSize(rangeCommand.dataRange().keyRange(), version);
-            size += TypeSizes.sizeof(rangeCommand.limits().count());
-            size += TypeSizes.sizeof(!rangeCommand.isForThrift());
-            return size + TypeSizes.sizeof(rangeCommand.dataRange().isPaging());
-        }
-
-        static PartitionRangeReadCommand maybeConvertNamesToSlice(PartitionRangeReadCommand command)
-        {
-            if (!command.dataRange().isNamesQuery())
-                return command;
-
-            CFMetaData metadata = command.metadata();
-            if (!LegacyReadCommandSerializer.shouldConvertNamesToSlice(metadata, command.columnFilter().fetchedColumns()))
-                return command;
-
-            ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter) command.dataRange().clusteringIndexFilter;
-            ClusteringIndexSliceFilter sliceFilter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter(filter, metadata);
-            DataRange newRange = new DataRange(command.dataRange().keyRange(), sliceFilter);
-            return new PartitionRangeReadCommand(
-                    command.isDigestQuery(), command.digestVersion(), command.isForThrift(), metadata, command.nowInSec(),
-                    command.columnFilter(), command.rowFilter(), command.limits(), newRange, Optional.empty());
-        }
-
-        static ColumnFilter getColumnSelectionForSlice(boolean selectsStatics, int compositesToGroup, CFMetaData metadata)
-        {
-            // A value of -2 indicates this is a DISTINCT query that doesn't select static columns, only partition keys.
-            // In that case, we'll basically be querying the first row of the partition, but we must make sure we include
-            // all columns so we get at least one cell if there is a live row as it would confuse pre-3.0 nodes otherwise.
-            if (compositesToGroup == -2)
-                return ColumnFilter.all(metadata);
-
-            // if a slice query from a pre-3.0 node doesn't cover statics, we shouldn't select them at all
-            PartitionColumns columns = selectsStatics
-                                     ? metadata.partitionColumns()
-                                     : metadata.partitionColumns().withoutStatics();
-            return ColumnFilter.selectionBuilder().addAll(columns).build();
-        }
-    }
-
-    /**
-     * Serializer for pre-3.0 PagedRangeCommands.
-     */
-    private static class LegacyPagedRangeCommandSerializer implements IVersionedSerializer<ReadCommand>
-    {
-        public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command;
-            assert rangeCommand.dataRange().isPaging();
-
-            CFMetaData metadata = rangeCommand.metadata();
-
-            out.writeUTF(metadata.ksName);
-            out.writeUTF(metadata.cfName);
-            out.writeLong(rangeCommand.nowInSec() * 1000L);  // convert from seconds to millis
-
-            AbstractBounds.rowPositionSerializer.serialize(rangeCommand.dataRange().keyRange(), out, version);
-
-            // pre-3.0 nodes don't accept names filters for paged range commands
-            ClusteringIndexSliceFilter filter;
-            if (rangeCommand.dataRange().clusteringIndexFilter.kind() == ClusteringIndexFilter.Kind.NAMES)
-                filter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter((ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter, metadata);
-            else
-                filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter;
-
-            // slice filter
-            boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-            LegacyReadCommandSerializer.serializeSlices(out, filter.requestedSlices(), filter.isReversed(), makeStaticSlice, metadata);
-            out.writeBoolean(filter.isReversed());
-
-            // slice filter's count
-            DataLimits.Kind kind = rangeCommand.limits().kind();
-            boolean isDistinct = (kind == DataLimits.Kind.CQL_LIMIT || kind == DataLimits.Kind.CQL_PAGING_LIMIT) && rangeCommand.limits().perPartitionCount() == 1;
-            if (isDistinct)
-                out.writeInt(1);
-            else
-                out.writeInt(LegacyReadCommandSerializer.updateLimitForQuery(rangeCommand.limits().perPartitionCount(), filter.requestedSlices()));
-
-            // compositesToGroup
-            boolean selectsStatics = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() || filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-            int compositesToGroup;
-            if (kind == DataLimits.Kind.THRIFT_LIMIT)
-                compositesToGroup = -1;
-            else if (isDistinct && !selectsStatics)
-                compositesToGroup = -2;  // for DISTINCT queries (CASSANDRA-8490)
-            else
-                compositesToGroup = metadata.isDense() ? -1 : metadata.clusteringColumns().size();
-
-            out.writeInt(compositesToGroup);
-
-            // command-level "start" and "stop" composites.  The start is the last-returned cell name if there is one,
-            // otherwise it's the same as the slice filter's start.  The stop appears to always be the same as the
-            // slice filter's stop.
-            DataRange.Paging pagingRange = (DataRange.Paging) rangeCommand.dataRange();
-            Clustering lastReturned = pagingRange.getLastReturned();
-            ClusteringBound newStart = ClusteringBound.inclusiveStartOf(lastReturned);
-            Slice lastSlice = filter.requestedSlices().get(filter.requestedSlices().size() - 1);
-            ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeBound(metadata, newStart, true), out);
-            ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeClustering(metadata, lastSlice.end().clustering()), out);
-
-            LegacyRangeSliceCommandSerializer.serializeRowFilter(out, rangeCommand.rowFilter());
-
-            // command-level limit
-            // Pre-3.0 we would always request one more row than we actually needed and the command-level "start" would
-            // be the last-returned cell name, so the response would always include it.
-            int maxResults = rangeCommand.limits().count() + 1;
-            out.writeInt(maxResults);
-
-            // countCQL3Rows
-            if (rangeCommand.isForThrift() || rangeCommand.limits().perPartitionCount() == 1)  // for Thrift or DISTINCT
-                out.writeBoolean(false);
-            else
-                out.writeBoolean(true);
-        }
-
-        public ReadCommand deserialize(DataInputPlus in, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            String keyspace = in.readUTF();
-            String columnFamily = in.readUTF();
-
-            CFMetaData metadata = Schema.instance.getCFMetaData(keyspace, columnFamily);
-            if (metadata == null)
-            {
-                String message = String.format("Got legacy paged range command for nonexistent table %s.%s.", keyspace, columnFamily);
-                throw new UnknownColumnFamilyException(message, null);
-            }
-
-            int nowInSec = (int) (in.readLong() / 1000);  // convert from millis to seconds
-            AbstractBounds<PartitionPosition> keyRange = AbstractBounds.rowPositionSerializer.deserialize(in, metadata.partitioner, version);
-
-            Pair<ClusteringIndexSliceFilter, Boolean> p = LegacyReadCommandSerializer.deserializeSlicePartitionFilter(in, metadata);
-            ClusteringIndexSliceFilter filter = p.left;
-            boolean selectsStatics = p.right;
-
-            int perPartitionLimit = in.readInt();
-            int compositesToGroup = in.readInt();
-
-            // command-level Composite "start" and "stop"
-            LegacyLayout.LegacyBound startBound = LegacyLayout.decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), true);
-
-            ByteBufferUtil.readWithShortLength(in);  // the composite "stop", which isn't actually needed
-
-            ColumnFilter selection = LegacyRangeSliceCommandSerializer.getColumnSelectionForSlice(selectsStatics, compositesToGroup, metadata);
-
-            RowFilter rowFilter = LegacyRangeSliceCommandSerializer.deserializeRowFilter(in, metadata);
-            int maxResults = in.readInt();
-            boolean countCQL3Rows = in.readBoolean();
-
-            // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former,
-            // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter one is slightly less
-            // direct, but we know that on 2.1/2.2 queries, DISTINCT queries are the only CQL queries that have countCQL3Rows to false so we use
-            // that fact.
-            boolean isDistinct = compositesToGroup == -2 || (compositesToGroup != -1 && !countCQL3Rows);
-            DataLimits limits;
-            if (isDistinct)
-                limits = DataLimits.distinctLimits(maxResults);
-            else
-                limits = DataLimits.cqlLimits(maxResults);
-
-            limits = limits.forPaging(maxResults);
-
-            // The pagedRangeCommand is used in pre-3.0 for both the first page and the following ones. On the first page, the startBound will be
-            // the start of the overall slice and will not be a proper Clustering. So detect that case and just return a non-paging DataRange, which
-            // is what 3.0 does.
-            DataRange dataRange = new DataRange(keyRange, filter);
-            Slices slices = filter.requestedSlices();
-            if (!isDistinct && startBound != LegacyLayout.LegacyBound.BOTTOM && !startBound.bound.equals(slices.get(0).start()))
-            {
-                // pre-3.0 nodes normally expect pages to include the last cell from the previous page, but they handle it
-                // missing without any problems, so we can safely always set "inclusive" to false in the data range
-                dataRange = dataRange.forPaging(keyRange, metadata.comparator, startBound.getAsClustering(metadata), false);
-            }
-            return new PartitionRangeReadCommand(false, 0, true, metadata, nowInSec, selection, rowFilter, limits, dataRange, Optional.empty());
-        }
-
-        public long serializedSize(ReadCommand command, int version)
-        {
-            assert version < MessagingService.VERSION_30;
-            assert command.kind == Kind.PARTITION_RANGE;
-
-            PartitionRangeReadCommand rangeCommand = (PartitionRangeReadCommand) command;
-            CFMetaData metadata = rangeCommand.metadata();
-            assert rangeCommand.dataRange().isPaging();
-
-            long size = TypeSizes.sizeof(metadata.ksName);
-            size += TypeSizes.sizeof(metadata.cfName);
-            size += TypeSizes.sizeof((long) rangeCommand.nowInSec());
-
-            size += AbstractBounds.rowPositionSerializer.serializedSize(rangeCommand.dataRange().keyRange(), version);
-
-            // pre-3.0 nodes only accept slice filters for paged range commands
-            ClusteringIndexSliceFilter filter;
-            if (rangeCommand.dataRange().clusteringIndexFilter.kind() == ClusteringIndexFilter.Kind.NAMES)
-                filter = LegacyReadCommandSerializer.convertNamesFilterToSliceFilter((ClusteringIndexNamesFilter) rangeCommand.dataRange().clusteringIndexFilter, metadata);
-            else
-                filter = (ClusteringIndexSliceFilter) rangeCommand.dataRange().clusteringIndexFilter;
-
-            // slice filter
-            boolean makeStaticSlice = !rangeCommand.columnFilter().fetchedColumns().statics.isEmpty() && !filter.requestedSlices().selects(Clustering.STATIC_CLUSTERING);
-            size += LegacyReadCommandSerializer.serializedSlicesSize(filter.requestedSlices(), makeStaticSlice, metadata);
-            size += TypeSizes.sizeof(filter.isReversed());
-
-            // slice filter's count
-            size += TypeSizes.sizeof(rangeCommand.limits().perPartitionCount());
-
-            // compositesToGroup
-            size += TypeSizes.sizeof(0);
-
-            // command-level Composite "start" and "stop"
-            DataRange.Paging pagingRange = (DataRange.Paging) rangeCommand.dataRange();
-            Clustering lastReturned = pagingRange.getLastReturned();
-            Slice lastSlice = filter.requestedSlices().get(filter.requestedSlices().size() - 1);
-            size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeClustering(metadata, lastReturned));
-            size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeClustering(metadata, lastSlice.end().clustering()));
-
-            size += LegacyRangeSliceCommandSerializer.serializedRowFilterSize(rangeCommand.rowFilter());
-
-            // command-level limit
-            size += TypeSizes.sizeof(rangeCommand.limits().count());
-
-            // countCQL3Rows
-            return size + TypeSizes.sizeof(true);
-        }
-    }
-
-    /**
-     * Serializer for pre-3.0 ReadCommands.
-     */
-    static class LegacyReadCommandSerializer implements IVersionedSerializer<ReadCommand>
-    {
-        public void serialize(ReadCommand command, DataOutputPlus out, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-            assert command.kind == Kind.SINGLE_PARTITION;
-
-            SinglePartitionReadCommand singleReadCommand = (SinglePartitionReadCommand) command;
-            singleReadCommand = maybeConvertNamesToSlice(singleReadCommand);
-
-            CFMetaData metadata = singleReadCommand.metadata();
-
-            out.writeByte(LegacyType.fromPartitionFilterKind(singleReadCommand.clusteringIndexFilter().kind()).serializedValue);
-
-            out.writeBoolean(singleReadCommand.isDigestQuery());
-            out.writeUTF(metadata.ksName);
-            ByteBufferUtil.writeWithShortLength(singleReadCommand.partitionKey().getKey(), out);
-            out.writeUTF(metadata.cfName);
-            out.writeLong(singleReadCommand.nowInSec() * 1000L);  // convert from seconds to millis
-
-            if (singleReadCommand.clusteringIndexFilter().kind() == ClusteringIndexFilter.Kind.SLICE)
-                serializeSliceCommand(singleReadCommand, out);
-            else
-                serializeNamesCommand(singleReadCommand, out);
-        }
-
-        public ReadCommand deserialize(DataInputPlus in, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-            LegacyType msgType = LegacyType.fromSerializedValue(in.readByte());
-
-            boolean isDigest = in.readBoolean();
-            String keyspaceName = in.readUTF();
-            ByteBuffer key = ByteBufferUtil.readWithShortLength(in);
-            String cfName = in.readUTF();
-            long nowInMillis = in.readLong();
-            int nowInSeconds = (int) (nowInMillis / 1000);  // convert from millis to seconds
-            CFMetaData metadata = Schema.instance.getCFMetaData(keyspaceName, cfName);
-            DecoratedKey dk = metadata.partitioner.decorateKey(key);
-
-            switch (msgType)
-            {
-                case GET_BY_NAMES:
-                    return deserializeNamesCommand(in, isDigest, metadata, dk, nowInSeconds, version);
-                case GET_SLICES:
-                    return deserializeSliceCommand(in, isDigest, metadata, dk, nowInSeconds, version);
-                default:
-                    throw new AssertionError();
-            }
-        }
-
-        public long serializedSize(ReadCommand command, int version)
-        {
-            assert version < MessagingService.VERSION_30;
-            assert command.kind == Kind.SINGLE_PARTITION;
-            SinglePartitionReadCommand singleReadCommand = (SinglePartitionReadCommand) command;
-            singleReadCommand = maybeConvertNamesToSlice(singleReadCommand);
-
-            int keySize = singleReadCommand.partitionKey().getKey().remaining();
-
-            CFMetaData metadata = singleReadCommand.metadata();
-
-            long size = 1;  // message type (single byte)
-            size += TypeSizes.sizeof(command.isDigestQuery());
-            size += TypeSizes.sizeof(metadata.ksName);
-            size += TypeSizes.sizeof((short) keySize) + keySize;
-            size += TypeSizes.sizeof((long) command.nowInSec());
-
-            if (singleReadCommand.clusteringIndexFilter().kind() == ClusteringIndexFilter.Kind.SLICE)
-                return size + serializedSliceCommandSize(singleReadCommand);
-            else
-                return size + serializedNamesCommandSize(singleReadCommand);
-        }
-
-        private void serializeNamesCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException
-        {
-            serializeNamesFilter(command, (ClusteringIndexNamesFilter)command.clusteringIndexFilter(), out);
-        }
-
-        private static void serializeNamesFilter(ReadCommand command, ClusteringIndexNamesFilter filter, DataOutputPlus out) throws IOException
-        {
-            PartitionColumns columns = command.columnFilter().fetchedColumns();
-            CFMetaData metadata = command.metadata();
-            SortedSet<Clustering> requestedRows = filter.requestedRows();
-
-            if (requestedRows.isEmpty())
-            {
-                // only static columns are requested
-                out.writeInt(columns.size());
-                for (ColumnDefinition column : columns)
-                    ByteBufferUtil.writeWithShortLength(column.name.bytes, out);
-            }
-            else
-            {
-                out.writeInt(requestedRows.size() * columns.size());
-                for (Clustering clustering : requestedRows)
-                {
-                    for (ColumnDefinition column : columns)
-                        ByteBufferUtil.writeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null), out);
-                }
-            }
-
-            // countCql3Rows should be true if it's not for Thrift or a DISTINCT query
-            if (command.isForThrift() || (command.limits().kind() == DataLimits.Kind.CQL_LIMIT && command.limits().perPartitionCount() == 1))
-                out.writeBoolean(false);  // it's compact and not a DISTINCT query
-            else
-                out.writeBoolean(true);
-        }
-
-        static long serializedNamesFilterSize(ClusteringIndexNamesFilter filter, CFMetaData metadata, PartitionColumns fetchedColumns)
-        {
-            SortedSet<Clustering> requestedRows = filter.requestedRows();
-
-            long size = 0;
-            if (requestedRows.isEmpty())
-            {
-                // only static columns are requested
-                size += TypeSizes.sizeof(fetchedColumns.size());
-                for (ColumnDefinition column : fetchedColumns)
-                    size += ByteBufferUtil.serializedSizeWithShortLength(column.name.bytes);
-            }
-            else
-            {
-                size += TypeSizes.sizeof(requestedRows.size() * fetchedColumns.size());
-                for (Clustering clustering : requestedRows)
-                {
-                    for (ColumnDefinition column : fetchedColumns)
-                        size += ByteBufferUtil.serializedSizeWithShortLength(LegacyLayout.encodeCellName(metadata, clustering, column.name.bytes, null));
-                }
-            }
-
-            return size + TypeSizes.sizeof(true);  // countCql3Rows
-        }
-
-        private SinglePartitionReadCommand deserializeNamesCommand(DataInputPlus in, boolean isDigest, CFMetaData metadata, DecoratedKey key, int nowInSeconds, int version) throws IOException
-        {
-            Pair<ColumnFilter, ClusteringIndexNamesFilter> selectionAndFilter = deserializeNamesSelectionAndFilter(in, metadata);
-
-            // messages from old nodes will expect the thrift format, so always use 'true' for isForThrift
-            return new SinglePartitionReadCommand(
-                    isDigest, version, true, metadata, nowInSeconds, selectionAndFilter.left, RowFilter.NONE, DataLimits.NONE,
-                    key, selectionAndFilter.right);
-        }
-
-        static Pair<ColumnFilter, ClusteringIndexNamesFilter> deserializeNamesSelectionAndFilter(DataInputPlus in, CFMetaData metadata) throws IOException
-        {
-            int numCellNames = in.readInt();
-
-            // The names filter could include either a) static columns or b) normal columns with the clustering columns
-            // fully specified.  We need to handle those cases differently in 3.0.
-            NavigableSet<Clustering> clusterings = new TreeSet<>(metadata.comparator);
-
-            ColumnFilter.Builder selectionBuilder = ColumnFilter.selectionBuilder();
-            for (int i = 0; i < numCellNames; i++)
-            {
-                ByteBuffer buffer = ByteBufferUtil.readWithShortLength(in);
-                LegacyLayout.LegacyCellName cellName;
-                try
-                {
-                    cellName = LegacyLayout.decodeCellName(metadata, buffer);
-                }
-                catch (UnknownColumnException exc)
-                {
-                    // TODO this probably needs a new exception class that shares a parent with UnknownColumnFamilyException
-                    throw new UnknownColumnFamilyException(
-                            "Received legacy range read command with names filter for unrecognized column name. " +
-                                    "Fill name in filter (hex): " + ByteBufferUtil.bytesToHex(buffer), metadata.cfId);
-                }
-
-                // If we're querying for a static column, we may also need to read it
-                // as if it were a thrift dynamic column (because the column metadata,
-                // which makes it a static column in 3.0+, may have been added *after*
-                // some values were written). Note that all cql queries on non-compact
-                // tables used slice & not name filters prior to 3.0 so this path is
-                // not taken for non-compact tables. It is theoretically possible to
-                // get here via thrift, hence the check on metadata.isStaticCompactTable.
-                // See CASSANDRA-11087.
-                if (metadata.isStaticCompactTable() && cellName.clustering.equals(Clustering.STATIC_CLUSTERING))
-                {
-                    clusterings.add(Clustering.make(cellName.column.name.bytes));
-                    selectionBuilder.add(metadata.compactValueColumn());
-                }
-                else
-                {
-                    clusterings.add(cellName.clustering);
-                }
-
-                selectionBuilder.add(cellName.column);
-            }
-
-            // for compact storage tables without clustering keys, the column holding the selected value is named
-            // 'value' internally we add it to the selection here to prevent errors due to unexpected column names
-            // when serializing the initial local data response
-            if (metadata.isStaticCompactTable() && clusterings.isEmpty())
-                selectionBuilder.addAll(metadata.partitionColumns());
-
-            in.readBoolean();  // countCql3Rows
-
-            // clusterings cannot include STATIC_CLUSTERING, so if the names filter is for static columns, clusterings
-            // will be empty.  However, by requesting the static columns in our ColumnFilter, this will still work.
-            ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings, false);
-            return Pair.create(selectionBuilder.build(), filter);
-        }
-
-        private long serializedNamesCommandSize(SinglePartitionReadCommand command)
-        {
-            ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter)command.clusteringIndexFilter();
-            PartitionColumns columns = command.columnFilter().fetchedColumns();
-            return serializedNamesFilterSize(filter, command.metadata(), columns);
-        }
-
-        private void serializeSliceCommand(SinglePartitionReadCommand command, DataOutputPlus out) throws IOException
-        {
-            CFMetaData metadata = command.metadata();
-            ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter();
-
-            Slices slices = filter.requestedSlices();
-            boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING);
-            serializeSlices(out, slices, filter.isReversed(), makeStaticSlice, metadata);
-
-            out.writeBoolean(filter.isReversed());
-
-            boolean selectsStatics = !command.columnFilter().fetchedColumns().statics.isEmpty() || slices.selects(Clustering.STATIC_CLUSTERING);
-            DataLimits limits = command.limits();
-            if (limits.isDistinct())
-                out.writeInt(1);  // the limit is always 1 for DISTINCT queries
-            else
-                out.writeInt(updateLimitForQuery(command.limits().count(), filter.requestedSlices()));
-
-            int compositesToGroup;
-            if (limits.kind() == DataLimits.Kind.THRIFT_LIMIT || metadata.isDense())
-                compositesToGroup = -1;
-            else if (limits.isDistinct() && !selectsStatics)
-                compositesToGroup = -2;  // for DISTINCT queries (CASSANDRA-8490)
-            else
-                compositesToGroup = metadata.clusteringColumns().size();
-
-            out.writeInt(compositesToGroup);
-        }
-
-        private SinglePartitionReadCommand deserializeSliceCommand(DataInputPlus in, boolean isDigest, CFMetaData metadata, DecoratedKey key, int nowInSeconds, int version) throws IOException
-        {
-            Pair<ClusteringIndexSliceFilter, Boolean> p = deserializeSlicePartitionFilter(in, metadata);
-            ClusteringIndexSliceFilter filter = p.left;
-            boolean selectsStatics = p.right;
-            int count = in.readInt();
-            int compositesToGroup = in.readInt();
-
-            // if a slice query from a pre-3.0 node doesn't cover statics, we shouldn't select them at all
-            ColumnFilter columnFilter = LegacyRangeSliceCommandSerializer.getColumnSelectionForSlice(selectsStatics, compositesToGroup, metadata);
-
-            // We have 2 types of DISTINCT queries: ones on only the partition key, and ones on the partition key and static columns. For the former,
-            // we can easily detect the case because compositeToGroup is -2 and that's the only case it can be that. The latter is probablematic
-            // however as we have no way to distinguish it from a normal select with a limit of 1 (and this, contrarily to the range query case
-            // were the countCQL3Rows boolean allows us to decide).
-            // So we consider this case not distinct here. This is ok because even if it is a distinct (with static), the count will be 1 and
-            // we'll still just query one row (a distinct DataLimits currently behave exactly like a CQL limit with a count of 1). The only
-            // drawback is that we'll send back the first row entirely while a 2.1/2.2 node would return only the first cell in that same
-            // situation. This isn't a problem for 2.1/2.2 code however (it would be for a range query, as it would throw off the count for
-            // reasons similar to CASSANDRA-10762, but it's ok for single partition queries).
-            // We do _not_ want to do the reverse however and consider a 'SELECT * FROM foo LIMIT 1' as a DISTINCT query as that would make
-            // us only return the 1st cell rather then 1st row.
-            DataLimits limits;
-            if (compositesToGroup == -2)
-                limits = DataLimits.distinctLimits(count);  // See CASSANDRA-8490 for the explanation of this value
-            else if (compositesToGroup == -1)
-                limits = DataLimits.thriftLimits(1, count);
-            else
-                limits = DataLimits.cqlLimits(count);
-
-            // messages from old nodes will expect the thrift format, so always use 'true' for isForThrift
-            return new SinglePartitionReadCommand(isDigest, version, true, metadata, nowInSeconds, columnFilter, RowFilter.NONE, limits, key, filter);
-        }
-
-        private long serializedSliceCommandSize(SinglePartitionReadCommand command)
-        {
-            CFMetaData metadata = command.metadata();
-            ClusteringIndexSliceFilter filter = (ClusteringIndexSliceFilter)command.clusteringIndexFilter();
-
-            Slices slices = filter.requestedSlices();
-            boolean makeStaticSlice = !command.columnFilter().fetchedColumns().statics.isEmpty() && !slices.selects(Clustering.STATIC_CLUSTERING);
-
-            long size = serializedSlicesSize(slices, makeStaticSlice, metadata);
-            size += TypeSizes.sizeof(command.clusteringIndexFilter().isReversed());
-            size += TypeSizes.sizeof(command.limits().count());
-            return size + TypeSizes.sizeof(0);  // compositesToGroup
-        }
-
-        static void serializeSlices(DataOutputPlus out, Slices slices, boolean isReversed, boolean makeStaticSlice, CFMetaData metadata) throws IOException
-        {
-            out.writeInt(slices.size() + (makeStaticSlice ? 1 : 0));
-
-            // In 3.0 we always store the slices in normal comparator order.  Pre-3.0 nodes expect the slices to
-            // be in reversed order if the query is reversed, so we handle that here.
-            if (isReversed)
-            {
-                for (int i = slices.size() - 1; i >= 0; i--)
-                    serializeSlice(out, slices.get(i), true, metadata);
-                if (makeStaticSlice)
-                    serializeStaticSlice(out, true, metadata);
-            }
-            else
-            {
-                if (makeStaticSlice)
-                    serializeStaticSlice(out, false, metadata);
-                for (Slice slice : slices)
-                    serializeSlice(out, slice, false, metadata);
-            }
-        }
-
-        static long serializedSlicesSize(Slices slices, boolean makeStaticSlice, CFMetaData metadata)
-        {
-            long size = TypeSizes.sizeof(slices.size());
-
-            for (Slice slice : slices)
-            {
-                ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, slice.start(), true);
-                size += ByteBufferUtil.serializedSizeWithShortLength(sliceStart);
-                ByteBuffer sliceEnd = LegacyLayout.encodeBound(metadata, slice.end(), false);
-                size += ByteBufferUtil.serializedSizeWithShortLength(sliceEnd);
-            }
-
-            if (makeStaticSlice)
-                size += serializedStaticSliceSize(metadata);
-
-            return size;
-        }
-
-        static long serializedStaticSliceSize(CFMetaData metadata)
-        {
-            // unlike serializeStaticSlice(), but we don't care about reversal for size calculations
-            ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false);
-            long size = ByteBufferUtil.serializedSizeWithShortLength(sliceStart);
-
-            size += TypeSizes.sizeof((short) (metadata.comparator.size() * 3 + 2));
-            size += TypeSizes.sizeof((short) LegacyLayout.STATIC_PREFIX);
-            for (int i = 0; i < metadata.comparator.size(); i++)
-            {
-                size += ByteBufferUtil.serializedSizeWithShortLength(ByteBufferUtil.EMPTY_BYTE_BUFFER);
-                size += 1;  // EOC
-            }
-            return size;
-        }
-
-        private static void serializeSlice(DataOutputPlus out, Slice slice, boolean isReversed, CFMetaData metadata) throws IOException
-        {
-            ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, isReversed ? slice.end() : slice.start(), !isReversed);
-            ByteBufferUtil.writeWithShortLength(sliceStart, out);
-
-            ByteBuffer sliceEnd = LegacyLayout.encodeBound(metadata, isReversed ? slice.start() : slice.end(), isReversed);
-            ByteBufferUtil.writeWithShortLength(sliceEnd, out);
-        }
-
-        private static void serializeStaticSlice(DataOutputPlus out, boolean isReversed, CFMetaData metadata) throws IOException
-        {
-            // if reversed, write an empty bound for the slice start; if reversed, write out an empty bound for the
-            // slice finish after we've written the static slice start
-            if (!isReversed)
-            {
-                ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false);
-                ByteBufferUtil.writeWithShortLength(sliceStart, out);
-            }
-
-            // write out the length of the composite
-            out.writeShort(2 + metadata.comparator.size() * 3);  // two bytes + EOC for each component, plus static prefix
-            out.writeShort(LegacyLayout.STATIC_PREFIX);
-            for (int i = 0; i < metadata.comparator.size(); i++)
-            {
-                ByteBufferUtil.writeWithShortLength(ByteBufferUtil.EMPTY_BYTE_BUFFER, out);
-                // write the EOC, using an inclusive end if we're on the final component
-                out.writeByte(i == metadata.comparator.size() - 1 ? 1 : 0);
-            }
-
-            if (isReversed)
-            {
-                ByteBuffer sliceStart = LegacyLayout.encodeBound(metadata, ClusteringBound.BOTTOM, false);
-                ByteBufferUtil.writeWithShortLength(sliceStart, out);
-            }
-        }
-
-        // Returns the deserialized filter, and whether static columns are queried (in pre-3.0, both info are determined by the slices,
-        // but in 3.0 they are separated: whether static columns are queried or not depends on the ColumnFilter).
-        static Pair<ClusteringIndexSliceFilter, Boolean> deserializeSlicePartitionFilter(DataInputPlus in, CFMetaData metadata) throws IOException
-        {
-            int numSlices = in.readInt();
-            ByteBuffer[] startBuffers = new ByteBuffer[numSlices];
-            ByteBuffer[] finishBuffers = new ByteBuffer[numSlices];
-            for (int i = 0; i < numSlices; i++)
-            {
-                startBuffers[i] = ByteBufferUtil.readWithShortLength(in);
-                finishBuffers[i] = ByteBufferUtil.readWithShortLength(in);
-            }
-
-            boolean reversed = in.readBoolean();
-
-            if (reversed)
-            {
-                // pre-3.0, reversed query slices put the greater element at the start of the slice
-                ByteBuffer[] tmp = finishBuffers;
-                finishBuffers = startBuffers;
-                startBuffers = tmp;
-            }
-
-            boolean selectsStatics = false;
-            Slices.Builder slicesBuilder = new Slices.Builder(metadata.comparator);
-            for (int i = 0; i < numSlices; i++)
-            {
-                LegacyLayout.LegacyBound start = LegacyLayout.decodeBound(metadata, startBuffers[i], true);
-                LegacyLayout.LegacyBound finish = LegacyLayout.decodeBound(metadata, finishBuffers[i], false);
-
-                if (start.isStatic)
-                {
-                    // If we start at the static block, this means we start at the beginning of the partition in 3.0
-                    // terms (since 3.0 handles static outside of the slice).
-                    start = LegacyLayout.LegacyBound.BOTTOM;
-
-                    // Then if we include the static, records it
-                    if (start.bound.isInclusive())
-                        selectsStatics = true;
-                }
-                else if (start == LegacyLayout.LegacyBound.BOTTOM)
-                {
-                    selectsStatics = true;
-                }
-
-                // If the end of the slice is the end of the statics, then that mean this slice was just selecting static
-                // columns. We have already recorded that in selectsStatics, so we can ignore the slice (which doesn't make
-                // sense for 3.0).
-                if (finish.isStatic)
-                {
-                    assert finish.bound.isInclusive(); // it would make no sense for a pre-3.0 node to have a slice that stops
-                                                     // before the static columns (since there is nothing before that)
-                    continue;
-                }
-
-                slicesBuilder.add(Slice.make(start.bound, finish.bound));
-            }
-
-            return Pair.create(new ClusteringIndexSliceFilter(slicesBuilder.build(), reversed), selectsStatics);
-        }
-
-        private static SinglePartitionReadCommand maybeConvertNamesToSlice(SinglePartitionReadCommand command)
-        {
-            if (command.clusteringIndexFilter().kind() != ClusteringIndexFilter.Kind.NAMES)
-                return command;
-
-            CFMetaData metadata = command.metadata();
-
-            if (!shouldConvertNamesToSlice(metadata, command.columnFilter().fetchedColumns()))
-                return command;
-
-            ClusteringIndexNamesFilter filter = (ClusteringIndexNamesFilter)command.clusteringIndexFilter();
-            ClusteringIndexSliceFilter sliceFilter = convertNamesFilterToSliceFilter(filter, metadata);
-            return new SinglePartitionReadCommand(
-                    command.isDigestQuery(), command.digestVersion(), command.isForThrift(), metadata, command.nowInSec(),
-                    command.columnFilter(), command.rowFilter(), command.limits(), command.partitionKey(), sliceFilter);
-        }
-
-        /**
-         * Returns true if a names filter on the given table and column selection should be converted to a slice
-         * filter for compatibility with pre-3.0 nodes, false otherwise.
-         */
-        static boolean shouldConvertNamesToSlice(CFMetaData metadata, PartitionColumns columns)
-        {
-            // On pre-3.0 nodes, due to CASSANDRA-5762, we always do a slice for CQL3 tables (not dense, composite).
-            if (!metadata.isDense() && metadata.isCompound())
-                return true;
-
-            // pre-3.0 nodes don't support names filters for reading collections, so if we're requesting any of those,
-            // we need to convert this to a slice filter
-            for (ColumnDefinition column : columns)
-            {
-                if (column.type.isMultiCell())
-                    return true;
-            }
-            return false;
-        }
-
-        /**
-         * Converts a names filter that is incompatible with pre-3.0 nodes to a slice filter that is compatible.
-         */
-        private static ClusteringIndexSliceFilter convertNamesFilterToSliceFilter(ClusteringIndexNamesFilter filter, CFMetaData metadata)
-        {
-            SortedSet<Clustering> requestedRows = filter.requestedRows();
-            Slices slices;
-            if (requestedRows.isEmpty())
-            {
-                slices = Slices.NONE;
-            }
-            else if (requestedRows.size() == 1 && requestedRows.first().size() == 0)
-            {
-                slices = Slices.ALL;
-            }
-            else
-            {
-                Slices.Builder slicesBuilder = new Slices.Builder(metadata.comparator);
-                for (Clustering clustering : requestedRows)
-                    slicesBuilder.add(ClusteringBound.inclusiveStartOf(clustering), ClusteringBound.inclusiveEndOf(clustering));
-                slices = slicesBuilder.build();
-            }
-
-            return new ClusteringIndexSliceFilter(slices, filter.isReversed());
-        }
-
-        /**
-         * Potentially increases the existing query limit to account for the lack of exclusive bounds in pre-3.0 nodes.
-         * @param limit the existing query limit
-         * @param slices the requested slices
-         * @return the updated limit
-         */
-        static int updateLimitForQuery(int limit, Slices slices)
-        {
-            // Pre-3.0 nodes don't support exclusive bounds for slices. Instead, we query one more element if necessary
-            // and filter it later (in LegacyRemoteDataResponse)
-            if (!slices.hasLowerBound() && ! slices.hasUpperBound())
-                return limit;
-
-            for (Slice slice : slices)
-            {
-                if (limit == Integer.MAX_VALUE)
-                    return limit;
-
-                if (!slice.start().isInclusive())
-                    limit++;
-                if (!slice.end().isInclusive())
-                    limit++;
-            }
-            return limit;
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/ReadResponse.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ReadResponse.java b/src/java/org/apache/cassandra/db/ReadResponse.java
index cca21f8..c3eae0d 100644
--- a/src/java/org/apache/cassandra/db/ReadResponse.java
+++ b/src/java/org/apache/cassandra/db/ReadResponse.java
@@ -32,7 +32,6 @@ import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.dht.*;
-import org.apache.cassandra.io.ForwardingVersionedSerializer;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.util.DataInputBuffer;
 import org.apache.cassandra.io.util.DataInputPlus;
@@ -47,20 +46,6 @@ public abstract class ReadResponse
 {
     // Serializer for single partition read response
     public static final IVersionedSerializer<ReadResponse> serializer = new Serializer();
-    // Serializer for the pre-3.0 rang slice responses.
-    public static final IVersionedSerializer<ReadResponse> legacyRangeSliceReplySerializer = new LegacyRangeSliceReplySerializer();
-    // Serializer for partition range read response (this actually delegate to 'serializer' in 3.0 and to
-    // 'legacyRangeSliceReplySerializer' in older version.
-    public static final IVersionedSerializer<ReadResponse> rangeSliceSerializer = new ForwardingVersionedSerializer<ReadResponse>()
-    {
-        @Override
-        protected IVersionedSerializer<ReadResponse> delegate(int version)
-        {
-            return version < MessagingService.VERSION_30
-                    ? legacyRangeSliceReplySerializer
-                    : serializer;
-        }
-    };
 
     // This is used only when serializing data responses and we can't it easily in other cases. So this can be null, which is slighly
     // hacky, but as this hack doesn't escape this class, and it's easy enough to validate that it's not null when we need, it's "good enough".
@@ -95,7 +80,7 @@ public abstract class ReadResponse
     protected static ByteBuffer makeDigest(UnfilteredPartitionIterator iterator, ReadCommand command)
     {
         MessageDigest digest = FBUtilities.threadLocalMD5Digest();
-        UnfilteredPartitionIterators.digest(command, iterator, digest, command.digestVersion());
+        UnfilteredPartitionIterators.digest(iterator, digest, command.digestVersion());
         return ByteBuffer.wrap(digest.digest());
     }
 
@@ -210,130 +195,12 @@ public abstract class ReadResponse
         }
     }
 
-    /**
-     * A remote response from a pre-3.0 node.  This needs a separate class in order to cleanly handle trimming and
-     * reversal of results when the read command calls for it.  Pre-3.0 nodes always return results in the normal
-     * sorted order, even if the query asks for reversed results.  Additionally,  pre-3.0 nodes do not have a notion of
-     * exclusive slices on non-composite tables, so extra rows may need to be trimmed.
-     */
-    @VisibleForTesting
-    static class LegacyRemoteDataResponse extends ReadResponse
-    {
-        private final List<ImmutableBTreePartition> partitions;
-
-        @VisibleForTesting
-        LegacyRemoteDataResponse(List<ImmutableBTreePartition> partitions)
-        {
-            super(null); // we never serialize LegacyRemoteDataResponses, so we don't care about the command
-            this.partitions = partitions;
-        }
-
-        public UnfilteredPartitionIterator makeIterator(final ReadCommand command)
-        {
-            // Due to a bug in the serialization of AbstractBounds, anything that isn't a Range is understood by pre-3.0 nodes
-            // as a Bound, which means IncludingExcludingBounds and ExcludingBounds responses may include keys they shouldn't.
-            // So filter partitions that shouldn't be included here.
-            boolean skipFirst = false;
-            boolean skipLast = false;
-            if (!partitions.isEmpty() && command instanceof PartitionRangeReadCommand)
-            {
-                AbstractBounds<PartitionPosition> keyRange = ((PartitionRangeReadCommand)command).dataRange().keyRange();
-                boolean isExcludingBounds = keyRange instanceof ExcludingBounds;
-                skipFirst = isExcludingBounds && !keyRange.contains(partitions.get(0).partitionKey());
-                skipLast = (isExcludingBounds || keyRange instanceof IncludingExcludingBounds) && !keyRange.contains(partitions.get(partitions.size() - 1).partitionKey());
-            }
-
-            final List<ImmutableBTreePartition> toReturn;
-            if (skipFirst || skipLast)
-            {
-                toReturn = partitions.size() == 1
-                         ? Collections.emptyList()
-                         : partitions.subList(skipFirst ? 1 : 0, skipLast ? partitions.size() - 1 : partitions.size());
-            }
-            else
-            {
-                toReturn = partitions;
-            }
-
-            return new AbstractUnfilteredPartitionIterator()
-            {
-                private int idx;
-
-                public boolean isForThrift()
-                {
-                    return true;
-                }
-
-                public CFMetaData metadata()
-                {
-                    return command.metadata();
-                }
-
-                public boolean hasNext()
-                {
-                    return idx < toReturn.size();
-                }
-
-                public UnfilteredRowIterator next()
-                {
-                    ImmutableBTreePartition partition = toReturn.get(idx++);
-
-                    ClusteringIndexFilter filter = command.clusteringIndexFilter(partition.partitionKey());
-
-                    // Pre-3.0, we would always request one more row than we actually needed and the command-level "start" would
-                    // be the last-returned cell name, so the response would always include it.
-                    UnfilteredRowIterator iterator = partition.unfilteredIterator(command.columnFilter(), filter.getSlices(command.metadata()), filter.isReversed());
-
-                    // Wrap results with a ThriftResultMerger only if they're intended for the thrift command.
-                    if (command.isForThrift())
-                        return ThriftResultsMerger.maybeWrap(iterator, command.nowInSec());
-                    else
-                        return iterator;
-                }
-            };
-        }
-
-        public ByteBuffer digest(ReadCommand command)
-        {
-            try (UnfilteredPartitionIterator iterator = makeIterator(command))
-            {
-                return makeDigest(iterator, command);
-            }
-        }
-
-        public boolean isDigestResponse()
-        {
-            return false;
-        }
-    }
-
     private static class Serializer implements IVersionedSerializer<ReadResponse>
     {
         public void serialize(ReadResponse response, DataOutputPlus out, int version) throws IOException
         {
             boolean isDigest = response instanceof DigestResponse;
             ByteBuffer digest = isDigest ? ((DigestResponse)response).digest : ByteBufferUtil.EMPTY_BYTE_BUFFER;
-            if (version < MessagingService.VERSION_30)
-            {
-                out.writeInt(digest.remaining());
-                out.write(digest);
-                out.writeBoolean(isDigest);
-                if (!isDigest)
-                {
-                    assert response.command != null; // we only serialize LocalDataResponse, which always has the command set
-                    try (UnfilteredPartitionIterator iter = response.makeIterator(response.command))
-                    {
-                        assert iter.hasNext();
-                        try (UnfilteredRowIterator partition = iter.next())
-                        {
-                            ByteBufferUtil.writeWithShortLength(partition.partitionKey().getKey(), out);
-                            LegacyLayout.serializeAsLegacyPartition(response.command, partition, out, version);
-                        }
-                        assert !iter.hasNext();
-                    }
-                }
-                return;
-            }
 
             ByteBufferUtil.writeWithVIntLength(digest, out);
             if (!isDigest)
@@ -345,38 +212,12 @@ public abstract class ReadResponse
 
         public ReadResponse deserialize(DataInputPlus in, int version) throws IOException
         {
-            if (version < MessagingService.VERSION_30)
-            {
-                byte[] digest = null;
-                int digestSize = in.readInt();
-                if (digestSize > 0)
-                {
-                    digest = new byte[digestSize];
-                    in.readFully(digest, 0, digestSize);
-                }
-                boolean isDigest = in.readBoolean();
-                assert isDigest == digestSize > 0;
-                if (isDigest)
-                {
-                    assert digest != null;
-                    return new DigestResponse(ByteBuffer.wrap(digest));
-                }
-
-                // ReadResponses from older versions are always single-partition (ranges are handled by RangeSliceReply)
-                ByteBuffer key = ByteBufferUtil.readWithShortLength(in);
-                try (UnfilteredRowIterator rowIterator = LegacyLayout.deserializeLegacyPartition(in, version, SerializationHelper.Flag.FROM_REMOTE, key))
-                {
-                    if (rowIterator == null)
-                        return new LegacyRemoteDataResponse(Collections.emptyList());
-
-                    return new LegacyRemoteDataResponse(Collections.singletonList(ImmutableBTreePartition.create(rowIterator)));
-                }
-            }
-
             ByteBuffer digest = ByteBufferUtil.readWithVIntLength(in);
             if (digest.hasRemaining())
                 return new DigestResponse(digest);
 
+            // Note that we can only get there if version == 3.0, which is the current_version. When we'll change the
+            // version, we'll have to deserialize/re-serialize the data to be in the proper version.
             assert version == MessagingService.VERSION_30;
             ByteBuffer data = ByteBufferUtil.readWithVIntLength(in);
             return new RemoteDataResponse(data);
@@ -387,28 +228,6 @@ public abstract class ReadResponse
             boolean isDigest = response instanceof DigestResponse;
             ByteBuffer digest = isDigest ? ((DigestResponse)response).digest : ByteBufferUtil.EMPTY_BYTE_BUFFER;
 
-            if (version < MessagingService.VERSION_30)
-            {
-                long size = TypeSizes.sizeof(digest.remaining())
-                        + digest.remaining()
-                        + TypeSizes.sizeof(isDigest);
-                if (!isDigest)
-                {
-                    assert response.command != null; // we only serialize LocalDataResponse, which always has the command set
-                    try (UnfilteredPartitionIterator iter = response.makeIterator(response.command))
-                    {
-                        assert iter.hasNext();
-                        try (UnfilteredRowIterator partition = iter.next())
-                        {
-                            size += ByteBufferUtil.serializedSizeWithShortLength(partition.partitionKey().getKey());
-                            size += LegacyLayout.serializedSizeAsLegacyPartition(response.command, partition, version);
-                        }
-                        assert !iter.hasNext();
-                    }
-                }
-                return size;
-            }
-
             long size = ByteBufferUtil.serializedSizeWithVIntLength(digest);
             if (!isDigest)
             {
@@ -421,81 +240,4 @@ public abstract class ReadResponse
             return size;
         }
     }
-
-    private static class LegacyRangeSliceReplySerializer implements IVersionedSerializer<ReadResponse>
-    {
-        public void serialize(ReadResponse response, DataOutputPlus out, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            // determine the number of partitions upfront for serialization
-            int numPartitions = 0;
-            assert response.command != null; // we only serialize LocalDataResponse, which always has the command set
-            try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command))
-            {
-                while (iterator.hasNext())
-                {
-                    try (UnfilteredRowIterator atomIterator = iterator.next())
-                    {
-                        numPartitions++;
-
-                        // we have to fully exhaust the subiterator
-                        while (atomIterator.hasNext())
-                            atomIterator.next();
-                    }
-                }
-            }
-
-            out.writeInt(numPartitions);
-
-            try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command))
-            {
-                while (iterator.hasNext())
-                {
-                    try (UnfilteredRowIterator partition = iterator.next())
-                    {
-                        ByteBufferUtil.writeWithShortLength(partition.partitionKey().getKey(), out);
-                        LegacyLayout.serializeAsLegacyPartition(response.command, partition, out, version);
-                    }
-                }
-            }
-        }
-
-        public ReadResponse deserialize(DataInputPlus in, int version) throws IOException
-        {
-            assert version < MessagingService.VERSION_30;
-
-            int partitionCount = in.readInt();
-            ArrayList<ImmutableBTreePartition> partitions = new ArrayList<>(partitionCount);
-            for (int i = 0; i < partitionCount; i++)
-            {
-                ByteBuffer key = ByteBufferUtil.readWithShortLength(in);
-                try (UnfilteredRowIterator partition = LegacyLayout.deserializeLegacyPartition(in, version, SerializationHelper.Flag.FROM_REMOTE, key))
-                {
-                    partitions.add(ImmutableBTreePartition.create(partition));
-                }
-            }
-            return new LegacyRemoteDataResponse(partitions);
-        }
-
-        public long serializedSize(ReadResponse response, int version)
-        {
-            assert version < MessagingService.VERSION_30;
-            long size = TypeSizes.sizeof(0);  // number of partitions
-
-            assert response.command != null; // we only serialize LocalDataResponse, which always has the command set
-            try (UnfilteredPartitionIterator iterator = response.makeIterator(response.command))
-            {
-                while (iterator.hasNext())
-                {
-                    try (UnfilteredRowIterator partition = iterator.next())
-                    {
-                        size += ByteBufferUtil.serializedSizeWithShortLength(partition.partitionKey().getKey());
-                        size += LegacyLayout.serializedSizeAsLegacyPartition(response.command, partition, version);
-                    }
-                }
-            }
-            return size;
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/RowIndexEntry.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RowIndexEntry.java b/src/java/org/apache/cassandra/db/RowIndexEntry.java
index e620dc0..a709ec3 100644
--- a/src/java/org/apache/cassandra/db/RowIndexEntry.java
+++ b/src/java/org/apache/cassandra/db/RowIndexEntry.java
@@ -253,7 +253,7 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         public Serializer(CFMetaData metadata, Version version, SerializationHeader header)
         {
-            this.idxInfoSerializer = metadata.serializers().indexInfoSerializer(version, header);
+            this.idxInfoSerializer = IndexInfo.serializer(version, header);
             this.version = version;
         }
 
@@ -264,22 +264,16 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         public void serialize(RowIndexEntry<IndexInfo> rie, DataOutputPlus out, ByteBuffer indexInfo) throws IOException
         {
-            assert version.storeRows() : "We read old index files but we should never write them";
-
             rie.serialize(out, idxInfoSerializer, indexInfo);
         }
 
         public void serializeForCache(RowIndexEntry<IndexInfo> rie, DataOutputPlus out) throws IOException
         {
-            assert version.storeRows();
-
             rie.serializeForCache(out);
         }
 
         public RowIndexEntry<IndexInfo> deserializeForCache(DataInputPlus in) throws IOException
         {
-            assert version.storeRows();
-
             long position = in.readUnsignedVInt();
 
             switch (in.readByte())
@@ -297,8 +291,6 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         public static void skipForCache(DataInputPlus in, Version version) throws IOException
         {
-            assert version.storeRows();
-
             /* long position = */in.readUnsignedVInt();
             switch (in.readByte())
             {
@@ -317,9 +309,6 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         public RowIndexEntry<IndexInfo> deserialize(DataInputPlus in, long indexFilePosition) throws IOException
         {
-            if (!version.storeRows())
-                return LegacyShallowIndexedEntry.deserialize(in, indexFilePosition, idxInfoSerializer);
-
             long position = in.readUnsignedVInt();
 
             int size = (int)in.readUnsignedVInt();
@@ -354,9 +343,6 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         public long deserializePositionAndSkip(DataInputPlus in) throws IOException
         {
-            if (!version.storeRows())
-                return LegacyShallowIndexedEntry.deserializePositionAndSkip(in);
-
             return ShallowIndexedEntry.deserializePositionAndSkip(in);
         }
 
@@ -367,7 +353,7 @@ public class RowIndexEntry<T> implements IMeasurableMemory
          */
         public static long readPosition(DataInputPlus in, Version version) throws IOException
         {
-            return version.storeRows() ? in.readUnsignedVInt() : in.readLong();
+            return in.readUnsignedVInt();
         }
 
         public static void skip(DataInputPlus in, Version version) throws IOException
@@ -378,7 +364,7 @@ public class RowIndexEntry<T> implements IMeasurableMemory
 
         private static void skipPromotedIndex(DataInputPlus in, Version version) throws IOException
         {
-            int size = version.storeRows() ? (int)in.readUnsignedVInt() : in.readInt();
+            int size = (int)in.readUnsignedVInt();
             if (size <= 0)
                 return;
 
@@ -413,164 +399,6 @@ public class RowIndexEntry<T> implements IMeasurableMemory
         out.writeByte(CACHE_NOT_INDEXED);
     }
 
-    private static final class LegacyShallowIndexedEntry extends RowIndexEntry<IndexInfo>
-    {
-        private static final long BASE_SIZE;
-        static
-        {
-            BASE_SIZE = ObjectSizes.measure(new LegacyShallowIndexedEntry(0, 0, DeletionTime.LIVE, 0, new int[0], null, 0));
-        }
-
-        private final long indexFilePosition;
-        private final int[] offsets;
-        @Unmetered
-        private final IndexInfo.Serializer idxInfoSerializer;
-        private final DeletionTime deletionTime;
-        private final long headerLength;
-        private final int serializedSize;
-
-        private LegacyShallowIndexedEntry(long dataFilePosition, long indexFilePosition,
-                                          DeletionTime deletionTime, long headerLength,
-                                          int[] offsets, IndexInfo.Serializer idxInfoSerializer,
-                                          int serializedSize)
-        {
-            super(dataFilePosition);
-            this.deletionTime = deletionTime;
-            this.headerLength = headerLength;
-            this.indexFilePosition = indexFilePosition;
-            this.offsets = offsets;
-            this.idxInfoSerializer = idxInfoSerializer;
-            this.serializedSize = serializedSize;
-        }
-
-        @Override
-        public DeletionTime deletionTime()
-        {
-            return deletionTime;
-        }
-
-        @Override
-        public long headerLength()
-        {
-            return headerLength;
-        }
-
-        @Override
-        public long unsharedHeapSize()
-        {
-            return BASE_SIZE + offsets.length * TypeSizes.sizeof(0);
-        }
-
-        @Override
-        public int columnsIndexCount()
-        {
-            return offsets.length;
-        }
-
-        @Override
-        public void serialize(DataOutputPlus out, IndexInfo.Serializer idxInfoSerializer, ByteBuffer indexInfo)
-        {
-            throw new UnsupportedOperationException("serializing legacy index entries is not supported");
-        }
-
-        @Override
-        public void serializeForCache(DataOutputPlus out)
-        {
-            throw new UnsupportedOperationException("serializing legacy index entries is not supported");
-        }
-
-        @Override
-        public IndexInfoRetriever openWithIndex(FileHandle indexFile)
-        {
-            int fieldsSize = (int) DeletionTime.serializer.serializedSize(deletionTime)
-                             + TypeSizes.sizeof(0); // columnIndexCount
-            indexEntrySizeHistogram.update(serializedSize);
-            indexInfoCountHistogram.update(offsets.length);
-            return new LegacyIndexInfoRetriever(indexFilePosition +
-                                                TypeSizes.sizeof(0L) + // position
-                                                TypeSizes.sizeof(0) + // indexInfoSize
-                                                fieldsSize,
-                                                offsets, indexFile.createReader(), idxInfoSerializer);
-        }
-
-        public static RowIndexEntry<IndexInfo> deserialize(DataInputPlus in, long indexFilePosition,
-                                                IndexInfo.Serializer idxInfoSerializer) throws IOException
-        {
-            long dataFilePosition = in.readLong();
-
-            int size = in.readInt();
-            if (size == 0)
-            {
-                return new RowIndexEntry<>(dataFilePosition);
-            }
-            else if (size <= DatabaseDescriptor.getColumnIndexCacheSize())
-            {
-                return new IndexedEntry(dataFilePosition, in, idxInfoSerializer);
-            }
-            else
-            {
-                DeletionTime deletionTime = DeletionTime.serializer.deserialize(in);
-
-                // For legacy sstables (i.e. sstables pre-"ma", pre-3.0) we have to scan all serialized IndexInfo
-                // objects to calculate the offsets array. However, it might be possible to deserialize all
-                // IndexInfo objects here - but to just skip feels more gentle to the heap/GC.
-
-                int entries = in.readInt();
-                int[] offsets = new int[entries];
-
-                TrackedDataInputPlus tracked = new TrackedDataInputPlus(in);
-                long start = tracked.getBytesRead();
-                long headerLength = 0L;
-                for (int i = 0; i < entries; i++)
-                {
-                    offsets[i] = (int) (tracked.getBytesRead() - start);
-                    if (i == 0)
-                    {
-                        IndexInfo info = idxInfoSerializer.deserialize(tracked);
-                        headerLength = info.offset;
-                    }
-                    else
-                        idxInfoSerializer.skip(tracked);
-                }
-
-                return new LegacyShallowIndexedEntry(dataFilePosition, indexFilePosition, deletionTime, headerLength, offsets, idxInfoSerializer, size);
-            }
-        }
-
-        static long deserializePositionAndSkip(DataInputPlus in) throws IOException
-        {
-            long position = in.readLong();
-
-            int size = in.readInt();
-            if (size > 0)
-                in.skipBytesFully(size);
-
-            return position;
-        }
-    }
-
-    private static final class LegacyIndexInfoRetriever extends FileIndexInfoRetriever
-    {
-        private final int[] offsets;
-
-        private LegacyIndexInfoRetriever(long indexFilePosition, int[] offsets, FileDataInput reader, IndexInfo.Serializer idxInfoSerializer)
-        {
-            super(indexFilePosition, reader, idxInfoSerializer);
-            this.offsets = offsets;
-        }
-
-        IndexInfo fetchIndex(int index) throws IOException
-        {
-            retrievals++;
-
-            // seek to posision of IndexInfo
-            indexReader.seek(indexInfoFilePosition + offsets[index]);
-
-            // deserialize IndexInfo
-            return idxInfoSerializer.deserialize(indexReader);
-        }
-    }
-
     /**
      * An entry in the row index for a row whose columns are indexed - used for both legacy and current formats.
      */
@@ -622,14 +450,9 @@ public class RowIndexEntry<T> implements IMeasurableMemory
             for (int i = 0; i < columnsIndexCount; i++)
                 this.columnsIndex[i] = idxInfoSerializer.deserialize(in);
 
-            int[] offsets = null;
-            if (version.storeRows())
-            {
-                offsets = new int[this.columnsIndex.length];
-                for (int i = 0; i < offsets.length; i++)
-                    offsets[i] = in.readInt();
-            }
-            this.offsets = offsets;
+            this.offsets = new int[this.columnsIndex.length];
+            for (int i = 0; i < offsets.length; i++)
+                offsets[i] = in.readInt();
 
             this.indexedPartSize = indexedPartSize;