You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by al...@apache.org on 2015/08/04 11:14:40 UTC

[1/5] cassandra git commit: Factor out TableParams from CFMetaData

Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 6932bd879 -> b31845c4a


http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
index 5f11f51..5493edb 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryManagerTest.java
@@ -39,7 +39,6 @@ import org.slf4j.LoggerFactory;
 import org.apache.cassandra.OrderedJUnit4ClassRunner;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
@@ -49,6 +48,7 @@ import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.metrics.RestorableMeter;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -87,12 +87,11 @@ public class IndexSummaryManagerTest
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARDLOWiINTERVAL)
                                                 .minIndexInterval(8)
                                                 .maxIndexInterval(256)
-                                                .caching(CachingOptions.NONE),
+                                                .caching(CachingParams.CACHE_NOTHING),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARDRACE)
                                                 .minIndexInterval(8)
                                                 .maxIndexInterval(256)
-                                                .caching(CachingOptions.NONE)
-        );
+                                                .caching(CachingParams.CACHE_NOTHING));
     }
 
     @Before
@@ -102,8 +101,8 @@ public class IndexSummaryManagerTest
         String cfname = CF_STANDARDLOWiINTERVAL; // index interval of 8, no key caching
         Keyspace keyspace = Keyspace.open(ksname);
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
-        originalMinIndexInterval = cfs.metadata.getMinIndexInterval();
-        originalMaxIndexInterval = cfs.metadata.getMaxIndexInterval();
+        originalMinIndexInterval = cfs.metadata.params.minIndexInterval;
+        originalMaxIndexInterval = cfs.metadata.params.maxIndexInterval;
         originalCapacity = IndexSummaryManager.instance.getMemoryPoolCapacityInMB();
     }
 
@@ -215,15 +214,15 @@ public class IndexSummaryManagerTest
             sstable.overrideReadMeter(new RestorableMeter(100.0, 100.0));
 
         for (SSTableReader sstable : sstables)
-            assertEquals(cfs.metadata.getMinIndexInterval(), sstable.getEffectiveIndexInterval(), 0.001);
+            assertEquals(cfs.metadata.params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
 
         // double the min_index_interval
         cfs.metadata.minIndexInterval(originalMinIndexInterval * 2);
         IndexSummaryManager.instance.redistributeSummaries();
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
-            assertEquals(cfs.metadata.getMinIndexInterval(), sstable.getEffectiveIndexInterval(), 0.001);
-            assertEquals(numRows / cfs.metadata.getMinIndexInterval(), sstable.getIndexSummarySize());
+            assertEquals(cfs.metadata.params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
+            assertEquals(numRows / cfs.metadata.params.minIndexInterval, sstable.getIndexSummarySize());
         }
 
         // return min_index_interval to its original value
@@ -231,8 +230,8 @@ public class IndexSummaryManagerTest
         IndexSummaryManager.instance.redistributeSummaries();
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
-            assertEquals(cfs.metadata.getMinIndexInterval(), sstable.getEffectiveIndexInterval(), 0.001);
-            assertEquals(numRows / cfs.metadata.getMinIndexInterval(), sstable.getIndexSummarySize());
+            assertEquals(cfs.metadata.params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
+            assertEquals(numRows / cfs.metadata.params.minIndexInterval, sstable.getIndexSummarySize());
         }
 
         // halve the min_index_interval, but constrain the available space to exactly what we have now; as a result,
@@ -281,7 +280,7 @@ public class IndexSummaryManagerTest
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.cfId, txn), 10);
         }
         sstable = cfs.getLiveSSTables().iterator().next();
-        assertEquals(cfs.metadata.getMinIndexInterval(), sstable.getEffectiveIndexInterval(), 0.001);
+        assertEquals(cfs.metadata.params.minIndexInterval, sstable.getEffectiveIndexInterval(), 0.001);
     }
 
     @Test
@@ -305,10 +304,10 @@ public class IndexSummaryManagerTest
         }
         sstables = new ArrayList<>(cfs.getLiveSSTables());
         for (SSTableReader sstable : sstables)
-            assertEquals(cfs.metadata.getMaxIndexInterval(), sstable.getEffectiveIndexInterval(), 0.01);
+            assertEquals(cfs.metadata.params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
 
         // halve the max_index_interval
-        cfs.metadata.maxIndexInterval(cfs.metadata.getMaxIndexInterval() / 2);
+        cfs.metadata.maxIndexInterval(cfs.metadata.params.maxIndexInterval / 2);
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.cfId, txn), 1);
@@ -316,20 +315,20 @@ public class IndexSummaryManagerTest
         sstables = new ArrayList<>(cfs.getLiveSSTables());
         for (SSTableReader sstable : sstables)
         {
-            assertEquals(cfs.metadata.getMaxIndexInterval(), sstable.getEffectiveIndexInterval(), 0.01);
-            assertEquals(numRows / cfs.metadata.getMaxIndexInterval(), sstable.getIndexSummarySize());
+            assertEquals(cfs.metadata.params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
+            assertEquals(numRows / cfs.metadata.params.maxIndexInterval, sstable.getIndexSummarySize());
         }
 
         // return max_index_interval to its original value
-        cfs.metadata.maxIndexInterval(cfs.metadata.getMaxIndexInterval() * 2);
+        cfs.metadata.maxIndexInterval(cfs.metadata.params.maxIndexInterval * 2);
         try (LifecycleTransaction txn = cfs.getTracker().tryModify(sstables, OperationType.UNKNOWN))
         {
             redistributeSummaries(Collections.EMPTY_LIST, of(cfs.metadata.cfId, txn), 1);
         }
         for (SSTableReader sstable : cfs.getLiveSSTables())
         {
-            assertEquals(cfs.metadata.getMaxIndexInterval(), sstable.getEffectiveIndexInterval(), 0.01);
-            assertEquals(numRows / cfs.metadata.getMaxIndexInterval(), sstable.getIndexSummarySize());
+            assertEquals(cfs.metadata.params.maxIndexInterval, sstable.getEffectiveIndexInterval(), 0.01);
+            assertEquals(numRows / cfs.metadata.params.maxIndexInterval, sstable.getIndexSummarySize());
         }
     }
 
@@ -344,7 +343,7 @@ public class IndexSummaryManagerTest
         int numRows = 256;
         createSSTables(ksname, cfname, numSSTables, numRows);
 
-        int minSamplingLevel = (BASE_SAMPLING_LEVEL * cfs.metadata.getMinIndexInterval()) / cfs.metadata.getMaxIndexInterval();
+        int minSamplingLevel = (BASE_SAMPLING_LEVEL * cfs.metadata.params.minIndexInterval) / cfs.metadata.params.maxIndexInterval;
 
         List<SSTableReader> sstables = new ArrayList<>(cfs.getLiveSSTables());
         for (SSTableReader sstable : sstables)
@@ -520,7 +519,7 @@ public class IndexSummaryManagerTest
             {
                 sstable = sstable.cloneWithNewSummarySamplingLevel(cfs, samplingLevel);
                 assertEquals(samplingLevel, sstable.getIndexSummarySamplingLevel());
-                int expectedSize = (numRows * samplingLevel) / (sstable.metadata.getMinIndexInterval() * BASE_SAMPLING_LEVEL);
+                int expectedSize = (numRows * samplingLevel) / (sstable.metadata.params.minIndexInterval * BASE_SAMPLING_LEVEL);
                 assertEquals(expectedSize, sstable.getIndexSummarySize(), 1);
                 txn.update(sstable, true);
                 txn.checkpoint();
@@ -575,20 +574,20 @@ public class IndexSummaryManagerTest
             cfs.forceBlockingFlush();
         }
 
-        assertTrue(manager.getAverageIndexInterval() >= cfs.metadata.getMinIndexInterval());
+        assertTrue(manager.getAverageIndexInterval() >= cfs.metadata.params.minIndexInterval);
         Map<String, Integer> intervals = manager.getIndexIntervals();
         for (Map.Entry<String, Integer> entry : intervals.entrySet())
             if (entry.getKey().contains(CF_STANDARDLOWiINTERVAL))
-                assertEquals(cfs.metadata.getMinIndexInterval(), entry.getValue(), 0.001);
+                assertEquals(cfs.metadata.params.minIndexInterval, entry.getValue(), 0.001);
 
         manager.setMemoryPoolCapacityInMB(0);
         manager.redistributeSummaries();
-        assertTrue(manager.getAverageIndexInterval() > cfs.metadata.getMinIndexInterval());
+        assertTrue(manager.getAverageIndexInterval() > cfs.metadata.params.minIndexInterval);
         intervals = manager.getIndexIntervals();
         for (Map.Entry<String, Integer> entry : intervals.entrySet())
         {
             if (entry.getKey().contains(CF_STANDARDLOWiINTERVAL))
-                assertTrue(entry.getValue() >= cfs.metadata.getMinIndexInterval());
+                assertTrue(entry.getValue() >= cfs.metadata.params.minIndexInterval);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index 2fe5ef2..6f92bd3 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -38,7 +38,6 @@ import org.junit.runner.RunWith;
 import org.apache.cassandra.OrderedJUnit4ClassRunner;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.db.BufferDecoratedKey;
@@ -58,6 +57,7 @@ import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.MmappedSegmentedFile;
 import org.apache.cassandra.io.util.SegmentedFile;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.ByteBufferUtil;
@@ -95,7 +95,7 @@ public class SSTableReaderTest
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARDLOWINDEXINTERVAL)
                                                 .minIndexInterval(8)
                                                 .maxIndexInterval(256)
-                                                .caching(CachingOptions.NONE));
+                                                .caching(CachingParams.CACHE_NOTHING));
     }
 
     @Test
@@ -342,7 +342,7 @@ public class SSTableReaderTest
 
         DecoratedKey firstKey = null, lastKey = null;
         long timestamp = System.currentTimeMillis();
-        for (int i = 0; i < store.metadata.getMinIndexInterval(); i++)
+        for (int i = 0; i < store.metadata.params.minIndexInterval; i++)
         {
             DecoratedKey key = Util.dk(String.valueOf(i));
             if (firstKey == null)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/schema/DefsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/schema/DefsTest.java b/test/unit/org/apache/cassandra/schema/DefsTest.java
index df78cef..680c016 100644
--- a/test/unit/org/apache/cassandra/schema/DefsTest.java
+++ b/test/unit/org/apache/cassandra/schema/DefsTest.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.function.Supplier;
 
+import com.google.common.collect.ImmutableMap;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -105,8 +106,8 @@ public class DefsTest
         cfm.comment("No comment")
            .readRepairChance(0.5)
            .gcGraceSeconds(100000)
-           .minCompactionThreshold(500)
-           .maxCompactionThreshold(500);
+           .compaction(CompactionParams.scts(ImmutableMap.of("min_threshold", "500",
+                                                             "max_threshold", "500")));
 
         // we'll be adding this one later. make sure it's not already there.
         assertNull(cfm.getColumnDefinition(ByteBuffer.wrap(new byte[]{ 5 })));
@@ -158,22 +159,6 @@ public class DefsTest
     }
 
     @Test
-    public void addNewCfWithNullComment() throws ConfigurationException
-    {
-        final String ks = KEYSPACE1;
-        final String cf = "BrandNewCfWithNull";
-        KeyspaceMetadata original = Schema.instance.getKSMetaData(ks);
-
-        CFMetaData newCf = addTestTable(original.name, cf, null);
-
-        assertFalse(Schema.instance.getKSMetaData(ks).tables.get(newCf.cfName).isPresent());
-        MigrationManager.announceNewColumnFamily(newCf);
-
-        assertTrue(Schema.instance.getKSMetaData(ks).tables.get(newCf.cfName).isPresent());
-        assertEquals(newCf, Schema.instance.getKSMetaData(ks).tables.get(newCf.cfName).get());
-    }
-
-    @Test
     public void addNewTable() throws ConfigurationException
     {
         final String ksName = KEYSPACE1;
@@ -402,7 +387,7 @@ public class DefsTest
         }
 
         Map<String, String> replicationMap = new HashMap<>();
-        replicationMap.put(KeyspaceParams.Replication.CLASS, OldNetworkTopologyStrategy.class.getName());
+        replicationMap.put(ReplicationParams.CLASS, OldNetworkTopologyStrategy.class.getName());
         replicationMap.put("replication_factor", "1");
 
         KeyspaceMetadata newKs = KeyspaceMetadata.create(cf.ksName, KeyspaceParams.create(true, replicationMap));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java b/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
index 82922e6..1b0fb12 100644
--- a/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
+++ b/test/unit/org/apache/cassandra/schema/LegacySchemaMigratorTest.java
@@ -26,13 +26,11 @@ import com.google.common.collect.ImmutableList;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.functions.*;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.thrift.ThriftConversion;
 
@@ -122,13 +120,14 @@ public class LegacySchemaMigratorTest
         // Make it easy to test compaction
         Map<String, String> compactionOptions = new HashMap<>();
         compactionOptions.put("tombstone_compaction_interval", "1");
+
         Map<String, String> leveledOptions = new HashMap<>();
         leveledOptions.put("sstable_size_in_mb", "1");
 
         keyspaces.add(KeyspaceMetadata.create(ks1,
                                               KeyspaceParams.simple(1),
                                               Tables.of(SchemaLoader.standardCFMD(ks1, "Standard1")
-                                                                    .compactionStrategyOptions(compactionOptions),
+                                                                    .compaction(CompactionParams.scts(compactionOptions)),
                                                         SchemaLoader.standardCFMD(ks1, "StandardGCGS0").gcGraceSeconds(0),
                                                         SchemaLoader.standardCFMD(ks1, "StandardLong1"),
                                                         SchemaLoader.superCFMD(ks1, "Super1", LongType.instance),
@@ -145,15 +144,13 @@ public class LegacySchemaMigratorTest
                                                         SchemaLoader.jdbcCFMD(ks1, "JdbcBytes", BytesType.instance),
                                                         SchemaLoader.jdbcCFMD(ks1, "JdbcAscii", AsciiType.instance),
                                                         SchemaLoader.standardCFMD(ks1, "StandardLeveled")
-                                                                    .compactionStrategyClass(LeveledCompactionStrategy.class)
-                                                                    .compactionStrategyOptions(leveledOptions),
+                                                                    .compaction(CompactionParams.lcs(leveledOptions)),
                                                         SchemaLoader.standardCFMD(ks1, "legacyleveled")
-                                                                    .compactionStrategyClass(LeveledCompactionStrategy.class)
-                                                                    .compactionStrategyOptions(leveledOptions),
+                                                                    .compaction(CompactionParams.lcs(leveledOptions)),
                                                         SchemaLoader.standardCFMD(ks1, "StandardLowIndexInterval")
                                                                     .minIndexInterval(8)
                                                                     .maxIndexInterval(256)
-                                                                    .caching(CachingOptions.NONE))));
+                                                                    .caching(CachingParams.CACHE_NOTHING))));
 
         // Keyspace 2
         keyspaces.add(KeyspaceMetadata.create(ks2,
@@ -184,6 +181,7 @@ public class LegacySchemaMigratorTest
         keyspaces.add(KeyspaceMetadata.create(ks5,
                                               KeyspaceParams.simple(2),
                                               Tables.of(SchemaLoader.standardCFMD(ks5, "Standard1"))));
+
         // Keyspace 6
         keyspaces.add(KeyspaceMetadata.create(ks6,
                                               KeyspaceParams.simple(1),
@@ -193,13 +191,11 @@ public class LegacySchemaMigratorTest
         keyspaces.add(KeyspaceMetadata.create(ks_rcs,
                                               KeyspaceParams.simple(1),
                                               Tables.of(SchemaLoader.standardCFMD(ks_rcs, "CFWithoutCache")
-                                                                    .caching(CachingOptions.NONE),
+                                                                    .caching(CachingParams.CACHE_NOTHING),
                                                         SchemaLoader.standardCFMD(ks_rcs, "CachedCF")
-                                                                    .caching(CachingOptions.ALL),
+                                                                    .caching(CachingParams.CACHE_EVERYTHING),
                                                         SchemaLoader.standardCFMD(ks_rcs, "CachedIntCF")
-                                                                    .caching(new CachingOptions(new CachingOptions.KeyCache(CachingOptions.KeyCache.Type.ALL),
-                                                                                                new CachingOptions.RowCache(CachingOptions.RowCache.Type.HEAD, 100))))));
-
+                                                                    .caching(new CachingParams(true, 100)))));
 
         keyspaces.add(KeyspaceMetadata.create(ks_nocommit,
                                               KeyspaceParams.simpleTransient(1),
@@ -423,23 +419,23 @@ public class LegacySchemaMigratorTest
             adder.add("comparator", LegacyLayout.makeLegacyComparator(table).toString());
         }
 
-        adder.add("bloom_filter_fp_chance", table.getBloomFilterFpChance())
-             .add("caching", table.getCaching().toString())
-             .add("comment", table.getComment())
-             .add("compaction_strategy_class", table.compactionStrategyClass.getName())
-             .add("compaction_strategy_options", json(table.compactionStrategyOptions))
-             .add("compression_parameters", json(ThriftConversion.compressionParametersToThrift(table.compressionParameters)))
-             .add("default_time_to_live", table.getDefaultTimeToLive())
-             .add("gc_grace_seconds", table.getGcGraceSeconds())
+        adder.add("bloom_filter_fp_chance", table.params.bloomFilterFpChance)
+             .add("caching", cachingToString(table.params.caching))
+             .add("comment", table.params.comment)
+             .add("compaction_strategy_class", table.params.compaction.klass().getName())
+             .add("compaction_strategy_options", json(table.params.compaction.options()))
+             .add("compression_parameters", json(ThriftConversion.compressionParametersToThrift(table.params.compression)))
+             .add("default_time_to_live", table.params.defaultTimeToLive)
+             .add("gc_grace_seconds", table.params.gcGraceSeconds)
              .add("key_validator", table.getKeyValidator().toString())
-             .add("local_read_repair_chance", table.getDcLocalReadRepairChance())
-             .add("max_compaction_threshold", table.getMaxCompactionThreshold())
-             .add("max_index_interval", table.getMaxIndexInterval())
-             .add("memtable_flush_period_in_ms", table.getMemtableFlushPeriod())
-             .add("min_compaction_threshold", table.getMinCompactionThreshold())
-             .add("min_index_interval", table.getMinIndexInterval())
-             .add("read_repair_chance", table.getReadRepairChance())
-             .add("speculative_retry", table.getSpeculativeRetry().toString());
+             .add("local_read_repair_chance", table.params.dcLocalReadRepairChance)
+             .add("max_compaction_threshold", table.params.compaction.maxCompactionThreshold())
+             .add("max_index_interval", table.params.maxIndexInterval)
+             .add("memtable_flush_period_in_ms", table.params.memtableFlushPeriodInMs)
+             .add("min_compaction_threshold", table.params.compaction.minCompactionThreshold())
+             .add("min_index_interval", table.params.minIndexInterval)
+             .add("read_repair_chance", table.params.readRepairChance)
+             .add("speculative_retry", table.params.speculativeRetry.toString());
 
         for (Map.Entry<ByteBuffer, CFMetaData.DroppedColumn> entry : table.getDroppedColumns().entrySet())
         {
@@ -464,6 +460,13 @@ public class LegacySchemaMigratorTest
         adder.build();
     }
 
+    private static String cachingToString(CachingParams caching)
+    {
+        return format("{\"keys\":\"%s\", \"rows_per_partition\":\"%s\"}",
+                      caching.keysAsString(),
+                      caching.rowsPerPartitionAsString());
+    }
+
     private static void addColumnToSchemaMutation(CFMetaData table, ColumnDefinition column, long timestamp, Mutation mutation)
     {
         // We need to special case pk-only dense tables. See CASSANDRA-9874.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
index 0545cc4..11fe3f1 100644
--- a/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/schema/SchemaKeyspaceTest.java
@@ -31,10 +31,6 @@ import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.compress.*;
-import org.apache.cassandra.schema.KeyspaceMetadata;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.thrift.CfDef;
 import org.apache.cassandra.thrift.ColumnDef;
 import org.apache.cassandra.thrift.IndexType;
@@ -126,7 +122,7 @@ public class SchemaKeyspaceTest
 
                 // Testing with compression to catch #3558
                 CFMetaData withCompression = cfm.copy();
-                withCompression.compressionParameters(CompressionParameters.snappy(32768));
+                withCompression.compression(CompressionParams.snappy(32768));
                 checkInverses(withCompression);
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
index 6f19aff..392b6f4 100644
--- a/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
+++ b/test/unit/org/apache/cassandra/service/StorageServiceServerTest.java
@@ -49,6 +49,7 @@ import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.PropertyFileSnitch;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.ReplicationParams;
 import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -192,7 +193,7 @@ public class StorageServiceServerTest
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC1", "1");
         configOptions.put("DC2", "1");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
@@ -235,7 +236,7 @@ public class StorageServiceServerTest
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC1", "1");
         configOptions.put("DC2", "1");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
@@ -272,7 +273,7 @@ public class StorageServiceServerTest
 
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC2", "2");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
@@ -311,7 +312,7 @@ public class StorageServiceServerTest
 
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC2", "2");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
@@ -363,7 +364,7 @@ public class StorageServiceServerTest
 
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC2", "2");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));
@@ -430,7 +431,7 @@ public class StorageServiceServerTest
         Map<String, String> configOptions = new HashMap<>();
         configOptions.put("DC1", "1");
         configOptions.put("DC2", "2");
-        configOptions.put(KeyspaceParams.Replication.CLASS, "NetworkTopologyStrategy");
+        configOptions.put(ReplicationParams.CLASS, "NetworkTopologyStrategy");
 
         Keyspace.clear("Keyspace1");
         KeyspaceMetadata meta = KeyspaceMetadata.create("Keyspace1", KeyspaceParams.create(false, configOptions));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
index 132abaa..37aea91 100644
--- a/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
+++ b/test/unit/org/apache/cassandra/streaming/compression/CompressedInputStreamTest.java
@@ -26,8 +26,7 @@ import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.io.compress.CompressedSequentialWriter;
 import org.apache.cassandra.io.compress.CompressionMetadata;
-import org.apache.cassandra.io.compress.CompressionParameters;
-import org.apache.cassandra.io.compress.SnappyCompressor;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
@@ -66,7 +65,7 @@ public class CompressedInputStreamTest
         File tmp = new File(File.createTempFile("cassandra", "unittest").getParent(), "ks-cf-ib-1-Data.db");
         Descriptor desc = Descriptor.fromFilename(tmp.getAbsolutePath());
         MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-        CompressionParameters param = CompressionParameters.snappy(32);
+        CompressionParams param = CompressionParams.snappy(32);
         CompressedSequentialWriter writer = new CompressedSequentialWriter(tmp, desc.filenameFor(Component.COMPRESSION_INFO), param, collector);
         Map<Long, Long> index = new HashMap<Long, Long>();
         for (long l = 0L; l < 1000; l++)


[4/5] cassandra git commit: Factor out TableParams from CFMetaData

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index c4377d6..d77cf1f 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -34,19 +34,13 @@ import com.google.common.base.*;
 import com.google.common.base.Throwables;
 import com.google.common.collect.*;
 import com.google.common.util.concurrent.*;
-
-import org.apache.cassandra.db.lifecycle.*;
-import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.metrics.TableMetrics;
-import org.json.simple.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.clearspring.analytics.stream.Counter;
 import org.apache.cassandra.cache.*;
 import org.apache.cassandra.concurrent.*;
 import org.apache.cassandra.config.*;
-import org.apache.cassandra.config.CFMetaData.SpeculativeRetry;
-import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.ReplayPosition;
 import org.apache.cassandra.db.compaction.*;
@@ -54,24 +48,28 @@ import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.index.SecondaryIndexManager;
 import org.apache.cassandra.db.view.MaterializedViewManager;
+import org.apache.cassandra.db.lifecycle.*;
 import org.apache.cassandra.db.partitions.*;
+import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.compress.CompressionParameters;
-import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.sstable.*;
+import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.format.*;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.metrics.TableMetrics.Sampler;
+import org.apache.cassandra.metrics.TableMetrics;
+import org.apache.cassandra.schema.*;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.*;
-import org.apache.cassandra.utils.concurrent.*;
 import org.apache.cassandra.utils.TopKSampler.SamplerResult;
+import org.apache.cassandra.utils.concurrent.*;
 import org.apache.cassandra.utils.memory.MemtableAllocator;
+import org.json.simple.*;
 
-import com.clearspring.analytics.stream.Counter;
 
 import static org.apache.cassandra.utils.Throwables.maybeFail;
 
@@ -185,10 +183,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         // only update these runtime-modifiable settings if they have not been modified.
         if (!minCompactionThreshold.isModified())
             for (ColumnFamilyStore cfs : concatWithIndexes())
-                cfs.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold());
+                cfs.minCompactionThreshold = new DefaultInteger(metadata.params.compaction.minCompactionThreshold());
         if (!maxCompactionThreshold.isModified())
             for (ColumnFamilyStore cfs : concatWithIndexes())
-                cfs.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold());
+                cfs.maxCompactionThreshold = new DefaultInteger(metadata.params.compaction.maxCompactionThreshold());
 
         compactionStrategyManager.maybeReload(metadata);
 
@@ -205,7 +203,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
     void scheduleFlush()
     {
-        int period = metadata.getMemtableFlushPeriod();
+        int period = metadata.params.memtableFlushPeriodInMs;
         if (period > 0)
         {
             logger.debug("scheduling flush in {} ms", period);
@@ -252,33 +250,25 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
     public void setCompactionStrategyClass(String compactionStrategyClass)
     {
-        try
-        {
-            metadata.compactionStrategyClass = CFMetaData.createCompactionStrategy(compactionStrategyClass);
-            compactionStrategyManager.maybeReload(metadata);
-        }
-        catch (ConfigurationException e)
-        {
-            throw new IllegalArgumentException(e.getMessage());
-        }
+        throw new UnsupportedOperationException("ColumnFamilyStore.setCompactionStrategyClass() method is no longer supported");
     }
 
     public String getCompactionStrategyClass()
     {
-        return metadata.compactionStrategyClass.getName();
+        return metadata.params.compaction.klass().getName();
     }
 
     public Map<String,String> getCompressionParameters()
     {
-        return metadata.compressionParameters().asMap();
+        return metadata.params.compression.asMap();
     }
 
     public void setCompressionParameters(Map<String,String> opts)
     {
         try
         {
-            metadata.compressionParameters = CompressionParameters.fromMap(opts);
-            metadata.compressionParameters.validate();
+            metadata.compression(CompressionParams.fromMap(opts));
+            metadata.params.compression.validate();
         }
         catch (ConfigurationException e)
         {
@@ -326,8 +316,8 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         this.keyspace = keyspace;
         name = columnFamilyName;
         this.metadata = metadata;
-        this.minCompactionThreshold = new DefaultInteger(metadata.getMinCompactionThreshold());
-        this.maxCompactionThreshold = new DefaultInteger(metadata.getMaxCompactionThreshold());
+        this.minCompactionThreshold = new DefaultInteger(metadata.params.compaction.minCompactionThreshold());
+        this.maxCompactionThreshold = new DefaultInteger(metadata.params.compaction.maxCompactionThreshold());
         this.directories = directories;
         this.indexManager = new SecondaryIndexManager(this);
         this.materializedViewManager = new MaterializedViewManager(this);
@@ -335,7 +325,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         fileIndexGenerator.set(generation);
         sampleLatencyNanos = DatabaseDescriptor.getReadRpcTimeout() / 2;
 
-        CachingOptions caching = metadata.getCaching();
+        CachingParams caching = metadata.params.caching;
 
         logger.info("Initializing {}.{}", keyspace.getName(), name);
 
@@ -349,7 +339,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             data.addInitialSSTables(sstables);
         }
 
-        if (caching.keyCache.isEnabled())
+        if (caching.cacheKeys())
             CacheService.instance.keyCache.loadSaved(this);
 
         // compaction strategy should be created after the CFS has been prepared
@@ -390,21 +380,20 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
             {
                 throw new RuntimeException(e);
             }
-            logger.debug("retryPolicy for {} is {}", name, this.metadata.getSpeculativeRetry());
+            logger.debug("retryPolicy for {} is {}", name, this.metadata.params.speculativeRetry);
             latencyCalculator = ScheduledExecutors.optionalTasks.scheduleWithFixedDelay(new Runnable()
             {
                 public void run()
                 {
-                    SpeculativeRetry retryPolicy = ColumnFamilyStore.this.metadata.getSpeculativeRetry();
-                    switch (retryPolicy.type)
+                    SpeculativeRetryParam retryPolicy = ColumnFamilyStore.this.metadata.params.speculativeRetry;
+                    switch (retryPolicy.kind())
                     {
                         case PERCENTILE:
                             // get percentile in nanos
-                            sampleLatencyNanos = (long) (metric.coordinatorReadLatency.getSnapshot().getValue(retryPolicy.value) * 1000d);
+                            sampleLatencyNanos = (long) (metric.coordinatorReadLatency.getSnapshot().getValue(retryPolicy.threshold()) * 1000d);
                             break;
                         case CUSTOM:
-                            // convert to nanos, since configuration is in millisecond
-                            sampleLatencyNanos = (long) (retryPolicy.value * 1000d * 1000d);
+                            sampleLatencyNanos = (long) retryPolicy.threshold();
                             break;
                         default:
                             sampleLatencyNanos = Long.MAX_VALUE;
@@ -1386,7 +1375,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         // what we're caching. Wen doing that, we should be careful about expiring cells: we should count
         // something expired that wasn't when the partition was cached, or we could decide that the whole
         // partition is cached when it's not. This is why we use CachedPartition#cachedLiveRows.
-        if (cached.cachedLiveRows() < metadata.getCaching().rowCache.rowsToCache)
+        if (cached.cachedLiveRows() < metadata.params.caching.rowsPerPartitionToCache())
             return true;
 
         // If the whole partition isn't cached, then we must guarantee that the filter cannot select data that
@@ -1398,7 +1387,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
     public int gcBefore(int nowInSec)
     {
-        return nowInSec - metadata.getGcGraceSeconds();
+        return nowInSec - metadata.params.gcGraceSeconds;
     }
 
     @SuppressWarnings("resource")
@@ -2153,7 +2142,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
     public boolean isRowCacheEnabled()
     {
-        return metadata.getCaching().rowCache.isEnabled() && CacheService.instance.rowCache.getCapacity() > 0;
+        return metadata.params.caching.cacheRows() && CacheService.instance.rowCache.getCapacity() > 0;
     }
 
     /**
@@ -2193,7 +2182,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
 
         for (SSTableReader sstable : getSSTables(SSTableSet.LIVE))
         {
-            allDroppable += sstable.getDroppableTombstonesBefore(localTime - sstable.metadata.getGcGraceSeconds());
+            allDroppable += sstable.getDroppableTombstonesBefore(localTime - sstable.metadata.params.gcGraceSeconds);
             allColumns += sstable.getEstimatedColumnCount().mean() * sstable.getEstimatedColumnCount().count();
         }
         return allColumns > 0 ? allDroppable / allColumns : 0;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/HintedHandOffManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/HintedHandOffManager.java b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
index 73189a6..17832d7 100644
--- a/src/java/org/apache/cassandra/db/HintedHandOffManager.java
+++ b/src/java/org/apache/cassandra/db/HintedHandOffManager.java
@@ -147,7 +147,7 @@ public class HintedHandOffManager implements HintedHandOffManagerMBean
     {
         int ttl = maxHintTTL;
         for (PartitionUpdate upd : mutation.getPartitionUpdates())
-            ttl = Math.min(ttl, upd.metadata().getGcGraceSeconds());
+            ttl = Math.min(ttl, upd.metadata().params.gcGraceSeconds);
         return ttl;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/LivenessInfo.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/LivenessInfo.java b/src/java/org/apache/cassandra/db/LivenessInfo.java
index 8f7b1c2..3c87030 100644
--- a/src/java/org/apache/cassandra/db/LivenessInfo.java
+++ b/src/java/org/apache/cassandra/db/LivenessInfo.java
@@ -48,7 +48,7 @@ public class LivenessInfo
 
     public static LivenessInfo create(CFMetaData metadata, long timestamp, int nowInSec)
     {
-        int defaultTTL = metadata.getDefaultTimeToLive();
+        int defaultTTL = metadata.params.defaultTimeToLive;
         if (defaultTTL != NO_TTL)
             return expiring(timestamp, defaultTTL, nowInSec);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index 5ec9fe5..a950e17 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -199,7 +199,7 @@ public class Memtable implements Comparable<Memtable>
      */
     public boolean isExpired()
     {
-        int period = cfs.metadata.getMemtableFlushPeriod();
+        int period = cfs.metadata.params.memtableFlushPeriodInMs;
         return period > 0 && (System.nanoTime() - creationNano >= TimeUnit.MILLISECONDS.toNanos(period));
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/RowUpdateBuilder.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/RowUpdateBuilder.java b/src/java/org/apache/cassandra/db/RowUpdateBuilder.java
index e4f05b0..372ba04 100644
--- a/src/java/org/apache/cassandra/db/RowUpdateBuilder.java
+++ b/src/java/org/apache/cassandra/db/RowUpdateBuilder.java
@@ -123,7 +123,7 @@ public class RowUpdateBuilder
 
     public RowUpdateBuilder(CFMetaData metadata, int localDeletionTime, long timestamp, Object partitionKey)
     {
-        this(metadata, localDeletionTime, timestamp, metadata.getDefaultTimeToLive(), partitionKey);
+        this(metadata, localDeletionTime, timestamp, metadata.params.defaultTimeToLive, partitionKey);
     }
 
     public RowUpdateBuilder(CFMetaData metadata, long timestamp, int ttl, Object partitionKey)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
index 6e9e2d5..bb184e8 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
@@ -292,7 +292,7 @@ public abstract class SinglePartitionReadCommand<F extends ClusteringIndexFilter
         cfs.metric.rowCacheMiss.inc();
         Tracing.trace("Row cache miss");
 
-        boolean cacheFullPartitions = metadata().getCaching().rowCache.cacheFullPartitions();
+        boolean cacheFullPartitions = metadata().params.caching.cacheAllRows();
 
         // To be able to cache what we read, what we read must at least covers what the cache holds, that
         // is the 'rowsToCache' first rows of the partition. We could read those 'rowsToCache' first rows
@@ -309,7 +309,7 @@ public abstract class SinglePartitionReadCommand<F extends ClusteringIndexFilter
 
             try
             {
-                int rowsToCache = cacheFullPartitions ? Integer.MAX_VALUE : metadata().getCaching().rowCache.rowsToCache;
+                int rowsToCache = metadata().params.caching.rowsPerPartitionToCache();
                 @SuppressWarnings("resource") // we close on exception or upon closing the result of this method
                 UnfilteredRowIterator iter = SinglePartitionReadCommand.fullPartitionRead(metadata(), nowInSec(), partitionKey()).queryMemtableAndDisk(cfs, readOp);
                 try

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index d17eaf7..2d0ca24 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -31,6 +31,7 @@ import com.google.common.io.ByteStreams;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.QueryProcessor;
@@ -39,7 +40,6 @@ import org.apache.cassandra.cql3.functions.*;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.commitlog.ReplayPosition;
 import org.apache.cassandra.db.compaction.CompactionHistoryTabularData;
-import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Range;
@@ -53,12 +53,8 @@ import org.apache.cassandra.io.util.NIODataInputStream;
 import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.metrics.RestorableMeter;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.Functions;
-import org.apache.cassandra.schema.KeyspaceMetadata;
-import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.SchemaKeyspace;
+import org.apache.cassandra.schema.*;
 import org.apache.cassandra.schema.Tables;
-import org.apache.cassandra.schema.Types;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.service.paxos.Commit;
 import org.apache.cassandra.service.paxos.PaxosState;
@@ -66,6 +62,9 @@ import org.apache.cassandra.thrift.cassandraConstants;
 import org.apache.cassandra.transport.Server;
 import org.apache.cassandra.utils.*;
 
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singletonMap;
+
 import static org.apache.cassandra.cql3.QueryProcessor.executeInternal;
 import static org.apache.cassandra.cql3.QueryProcessor.executeOnceInternal;
 
@@ -121,7 +120,7 @@ public final class SystemKeyspace
                 + "mutation blob,"
                 + "PRIMARY KEY ((target_id), hint_id, message_version)) "
                 + "WITH COMPACT STORAGE")
-                .compactionStrategyOptions(Collections.singletonMap("enabled", "false"))
+                .compaction(CompactionParams.scts(singletonMap("enabled", "false")))
                 .gcGraceSeconds(0);
 
     public static final CFMetaData Batchlog =
@@ -133,7 +132,7 @@ public final class SystemKeyspace
                 + "version int,"
                 + "written_at timestamp,"
                 + "PRIMARY KEY ((id)))")
-                .compactionStrategyOptions(Collections.singletonMap("min_threshold", "2"))
+                .compaction(CompactionParams.scts(singletonMap("min_threshold", "2")))
                 .gcGraceSeconds(0);
 
     private static final CFMetaData Paxos =
@@ -150,7 +149,7 @@ public final class SystemKeyspace
                 + "proposal_ballot timeuuid,"
                 + "proposal_version int,"
                 + "PRIMARY KEY ((row_key), cf_id))")
-                .compactionStrategyClass(LeveledCompactionStrategy.class);
+                .compaction(CompactionParams.lcs(emptyMap()));
 
     private static final CFMetaData BuiltIndexes =
         compile(BUILT_INDEXES,
@@ -610,7 +609,7 @@ public final class SystemKeyspace
         {
             ReplayPosition.serializer.serialize(position, out);
             out.writeLong(truncatedAt);
-            return Collections.singletonMap(cfs.metadata.cfId, ByteBuffer.wrap(out.getData(), 0, out.getLength()));
+            return singletonMap(cfs.metadata.cfId, ByteBuffer.wrap(out.getData(), 0, out.getLength()));
         }
         catch (IOException e)
         {
@@ -1093,7 +1092,7 @@ public final class SystemKeyspace
     private static int paxosTtl(CFMetaData metadata)
     {
         // keep paxos state around for at least 3h
-        return Math.max(3 * 3600, metadata.getGcGraceSeconds());
+        return Math.max(3 * 3600, metadata.params.gcGraceSeconds);
     }
 
     public static void savePaxosCommit(Commit commit)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
index c50beb6..e096011 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
@@ -38,7 +38,7 @@ import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.compress.ICompressor;
 import org.apache.cassandra.io.util.BufferedDataOutputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
@@ -93,7 +93,7 @@ public class CommitLog implements CommitLogMBean
     {
         compressorClass = DatabaseDescriptor.getCommitLogCompression();
         this.location = location;
-        ICompressor compressor = compressorClass != null ? CompressionParameters.createCompressor(compressorClass) : null;
+        ICompressor compressor = compressorClass != null ? CompressionParams.createCompressor(compressorClass) : null;
         DatabaseDescriptor.createAllDirectories();
 
         this.compressor = compressor;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
index ec270dd..e7d115d 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogArchiver.java
@@ -33,8 +33,7 @@ import java.util.concurrent.*;
 import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.compress.CompressionParameters;
-import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.WrappedRunnable;
 import org.slf4j.Logger;
@@ -212,7 +211,7 @@ public class CommitLogArchiver
                 if (descriptor.compression != null) {
                     try
                     {
-                        CompressionParameters.createCompressor(descriptor.compression);
+                        CompressionParams.createCompressor(descriptor.compression);
                     }
                     catch (ConfigurationException e)
                     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
index 1e12ed6..d232a63 100644
--- a/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
+++ b/src/java/org/apache/cassandra/db/commitlog/CommitLogReplayer.java
@@ -47,7 +47,7 @@ import org.apache.cassandra.db.rows.SerializationHelper;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.compress.ICompressor;
 import org.apache.cassandra.io.util.ByteBufferDataInput;
 import org.apache.cassandra.io.util.DataInputBuffer;
@@ -307,7 +307,7 @@ public class CommitLogReplayer
             {
                 try
                 {
-                    compressor = CompressionParameters.createCompressor(desc.compression);
+                    compressor = CompressionParams.createCompressor(desc.compression);
                 }
                 catch (ConfigurationException e)
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
index fc0acff..92cc249 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionManager.java
@@ -808,7 +808,7 @@ public class CompactionManager implements CompactionManagerMBean
 
         long totalkeysWritten = 0;
 
-        long expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(),
+        long expectedBloomFilterSize = Math.max(cfs.metadata.params.minIndexInterval,
                                                SSTableReader.getApproximateKeyCount(txn.originals()));
         if (logger.isDebugEnabled())
             logger.debug("Expected bloom filter size : {}", expectedBloomFilterSize);
@@ -1201,7 +1201,7 @@ public class CompactionManager implements CompactionManagerMBean
              CompactionController controller = new CompactionController(cfs, sstableAsSet, getDefaultGcBefore(cfs, nowInSec));
              CompactionIterator ci = new CompactionIterator(OperationType.ANTICOMPACTION, scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID(), metrics))
         {
-            int expectedBloomFilterSize = Math.max(cfs.metadata.getMinIndexInterval(), (int)(SSTableReader.getApproximateKeyCount(sstableAsSet)));
+            int expectedBloomFilterSize = Math.max(cfs.metadata.params.minIndexInterval, (int)(SSTableReader.getApproximateKeyCount(sstableAsSet)));
 
             repairedSSTableWriter.switchWriter(CompactionManager.createWriterForAntiCompaction(cfs, destination, expectedBloomFilterSize, repairedAt, sstableAsSet, anticompactionGroup));
             unRepairedSSTableWriter.switchWriter(CompactionManager.createWriterForAntiCompaction(cfs, destination, expectedBloomFilterSize, ActiveRepairService.UNREPAIRED_SSTABLE, sstableAsSet, anticompactionGroup));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
index e2ae6b6..d1b004d 100644
--- a/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionStrategyManager.java
@@ -18,16 +18,9 @@
 package org.apache.cassandra.db.compaction;
 
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 import java.util.concurrent.Callable;
 
-import com.google.common.collect.ImmutableMap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,17 +29,11 @@ import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Memtable;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.lifecycle.SSTableSet;
-import org.apache.cassandra.db.lifecycle.View;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
-import org.apache.cassandra.notifications.INotification;
-import org.apache.cassandra.notifications.INotificationConsumer;
-import org.apache.cassandra.notifications.SSTableAddedNotification;
-import org.apache.cassandra.notifications.SSTableDeletingNotification;
-import org.apache.cassandra.notifications.SSTableListChangedNotification;
-import org.apache.cassandra.notifications.SSTableRepairStatusChanged;
+import org.apache.cassandra.notifications.*;
 
 /**
  * Manages the compaction strategies.
@@ -56,14 +43,12 @@ import org.apache.cassandra.notifications.SSTableRepairStatusChanged;
  */
 public class CompactionStrategyManager implements INotificationConsumer
 {
-    protected static final String COMPACTION_ENABLED = "enabled";
     private static final Logger logger = LoggerFactory.getLogger(CompactionStrategyManager.class);
     private final ColumnFamilyStore cfs;
     private volatile AbstractCompactionStrategy repaired;
     private volatile AbstractCompactionStrategy unrepaired;
     private volatile boolean enabled = true;
     public boolean isActive = true;
-    private Map<String, String> options;
 
     public CompactionStrategyManager(ColumnFamilyStore cfs)
     {
@@ -71,9 +56,7 @@ public class CompactionStrategyManager implements INotificationConsumer
         logger.debug("{} subscribed to the data tracker.", this);
         this.cfs = cfs;
         reload(cfs.metadata);
-        String optionValue = cfs.metadata.compactionStrategyOptions.get(COMPACTION_ENABLED);
-        enabled = optionValue == null || Boolean.parseBoolean(optionValue);
-        options = ImmutableMap.copyOf(cfs.metadata.compactionStrategyOptions);
+        enabled = cfs.metadata.params.compaction.isEnabled();
     }
 
     /**
@@ -159,13 +142,12 @@ public class CompactionStrategyManager implements INotificationConsumer
         unrepaired.shutdown();
     }
 
-
     public synchronized void maybeReload(CFMetaData metadata)
     {
-        if (repaired != null && repaired.getClass().equals(metadata.compactionStrategyClass)
-                && unrepaired != null && unrepaired.getClass().equals(metadata.compactionStrategyClass)
-                && repaired.options.equals(metadata.compactionStrategyOptions) // todo: assumes all have the same options
-                && unrepaired.options.equals(metadata.compactionStrategyOptions))
+        if (repaired != null && repaired.getClass().equals(metadata.params.compaction.klass())
+                && unrepaired != null && unrepaired.getClass().equals(metadata.params.compaction.klass())
+                && repaired.options.equals(metadata.params.compaction.options()) // todo: assumes all have the same options
+                && unrepaired.options.equals(metadata.params.compaction.options()))
             return;
         reload(metadata);
     }
@@ -185,7 +167,6 @@ public class CompactionStrategyManager implements INotificationConsumer
             unrepaired.shutdown();
         repaired = metadata.createCompactionStrategyInstance(cfs);
         unrepaired = metadata.createCompactionStrategyInstance(cfs);
-        options = ImmutableMap.copyOf(metadata.compactionStrategyOptions);
         if (disabledWithJMX || !shouldBeEnabled())
             disable();
         else
@@ -445,8 +426,7 @@ public class CompactionStrategyManager implements INotificationConsumer
 
     public boolean shouldBeEnabled()
     {
-        String optionValue = options.get(COMPACTION_ENABLED);
-        return optionValue == null || Boolean.parseBoolean(optionValue);
+        return cfs.metadata.params.compaction.isEnabled();
     }
 
     public String getName()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
index 4f5e371..8fa3b8f 100644
--- a/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/DateTieredCompactionStrategy.java
@@ -22,17 +22,15 @@ import java.util.*;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Predicate;
 import com.google.common.collect.*;
-
-import org.apache.cassandra.db.lifecycle.SSTableSet;
-import org.apache.cassandra.db.lifecycle.View;
-import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cql3.statements.CFPropDefs;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.lifecycle.SSTableSet;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
@@ -422,8 +420,8 @@ public class DateTieredCompactionStrategy extends AbstractCompactionStrategy
         Map<String, String> uncheckedOptions = AbstractCompactionStrategy.validateOptions(options);
         uncheckedOptions = DateTieredCompactionStrategyOptions.validateOptions(options, uncheckedOptions);
 
-        uncheckedOptions.remove(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD);
-        uncheckedOptions.remove(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD);
+        uncheckedOptions.remove(CompactionParams.Option.MIN_THRESHOLD.toString());
+        uncheckedOptions.remove(CompactionParams.Option.MAX_THRESHOLD.toString());
 
         return uncheckedOptions;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/Scrubber.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
index 57fe267..891fac8 100644
--- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java
+++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java
@@ -124,7 +124,7 @@ public class Scrubber implements Closeable
         }
         this.checkData = checkData && !this.isIndex; //LocalByPartitionerType does not support validation
         this.expectedBloomFilterSize = Math.max(
-            cfs.metadata.getMinIndexInterval(),
+            cfs.metadata.params.minIndexInterval,
             hasIndexFile ? SSTableReader.getApproximateKeyCount(toScrub) : 0);
 
         // loop through each row, deserializing to check for damage.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
index fa3918f..09d40c8 100644
--- a/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
+++ b/src/java/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategy.java
@@ -22,18 +22,16 @@ import java.util.Map.Entry;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
-import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter;
-import org.apache.cassandra.db.compaction.writers.SplittingSizeTieredCompactionWriter;
-import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cql3.statements.CFPropDefs;
 import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.compaction.writers.CompactionAwareWriter;
+import org.apache.cassandra.db.compaction.writers.SplittingSizeTieredCompactionWriter;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.utils.Pair;
 
 import static com.google.common.collect.Iterables.filter;
@@ -304,8 +302,8 @@ public class SizeTieredCompactionStrategy extends AbstractCompactionStrategy
         Map<String, String> uncheckedOptions = AbstractCompactionStrategy.validateOptions(options);
         uncheckedOptions = SizeTieredCompactionStrategyOptions.validateOptions(options, uncheckedOptions);
 
-        uncheckedOptions.remove(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD);
-        uncheckedOptions.remove(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD);
+        uncheckedOptions.remove(CompactionParams.Option.MIN_THRESHOLD.toString());
+        uncheckedOptions.remove(CompactionParams.Option.MAX_THRESHOLD.toString());
 
         return uncheckedOptions;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/compaction/Upgrader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/compaction/Upgrader.java b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
index b8a102e..5a36210 100644
--- a/src/java/org/apache/cassandra/db/compaction/Upgrader.java
+++ b/src/java/org/apache/cassandra/db/compaction/Upgrader.java
@@ -61,7 +61,7 @@ public class Upgrader
         this.controller = new UpgradeController(cfs);
 
         this.strategyManager = cfs.getCompactionStrategyManager();
-        long estimatedTotalKeys = Math.max(cfs.metadata.getMinIndexInterval(), SSTableReader.getApproximateKeyCount(Arrays.asList(this.sstable)));
+        long estimatedTotalKeys = Math.max(cfs.metadata.params.minIndexInterval, SSTableReader.getApproximateKeyCount(Arrays.asList(this.sstable)));
         long estimatedSSTables = Math.max(1, SSTableReader.getTotalBytes(Arrays.asList(this.sstable)) / strategyManager.getMaxSSTableBytes());
         this.estimatedRows = (long) Math.ceil((double) estimatedTotalKeys / estimatedSSTables);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/rows/BufferCell.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/rows/BufferCell.java b/src/java/org/apache/cassandra/db/rows/BufferCell.java
index 81c42d4..f9a3026 100644
--- a/src/java/org/apache/cassandra/db/rows/BufferCell.java
+++ b/src/java/org/apache/cassandra/db/rows/BufferCell.java
@@ -61,8 +61,8 @@ public class BufferCell extends AbstractCell
 
     public static BufferCell live(CFMetaData metadata, ColumnDefinition column, long timestamp, ByteBuffer value, CellPath path)
     {
-        if (metadata.getDefaultTimeToLive() != NO_TTL)
-            return expiring(column, timestamp, metadata.getDefaultTimeToLive(), FBUtilities.nowInSeconds(), value, path);
+        if (metadata.params.defaultTimeToLive != NO_TTL)
+            return expiring(column, timestamp, metadata.params.defaultTimeToLive, FBUtilities.nowInSeconds(), value, path);
 
         return new BufferCell(column, timestamp, NO_TTL, NO_DELETION_TIME, value, path);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/db/view/MaterializedView.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/view/MaterializedView.java b/src/java/org/apache/cassandra/db/view/MaterializedView.java
index f36abae..988bfc5 100644
--- a/src/java/org/apache/cassandra/db/view/MaterializedView.java
+++ b/src/java/org/apache/cassandra/db/view/MaterializedView.java
@@ -684,9 +684,6 @@ public class MaterializedView
             }
         }
 
-        CFMetaData cfm = viewBuilder.build();
-        properties.properties.applyToCFMetadata(cfm);
-
-        return cfm;
+        return viewBuilder.build().params(properties.properties.asNewTableParams());
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java b/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
index 45c2cfa..464ac3d 100644
--- a/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
+++ b/src/java/org/apache/cassandra/dht/OrderPreservingPartitioner.java
@@ -221,7 +221,7 @@ public class OrderPreservingPartitioner implements IPartitioner
                 for (Range<Token> r : sortedRanges)
                 {
                     // Looping over every KS:CF:Range, get the splits size and add it to the count
-                    allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.cfName, r, cfmd.getMinIndexInterval()).size());
+                    allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.cfName, r, cfmd.params.minIndexInterval).size());
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
index b5c66df..a9dcc6c 100644
--- a/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
+++ b/src/java/org/apache/cassandra/hadoop/ConfigHelper.java
@@ -1,4 +1,3 @@
-package org.apache.cassandra.hadoop;
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -19,6 +18,7 @@ package org.apache.cassandra.hadoop;
  * under the License.
  *
  */
+package org.apache.cassandra.hadoop;
 
 import java.io.IOException;
 import java.util.*;
@@ -27,7 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.dht.IPartitioner;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.thrift.*;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.Hex;
@@ -456,7 +456,7 @@ public class ConfigHelper
 
     public static String getOutputCompressionChunkLength(Configuration conf)
     {
-        return conf.get(OUTPUT_COMPRESSION_CHUNK_LENGTH, String.valueOf(CompressionParameters.DEFAULT_CHUNK_LENGTH));
+        return conf.get(OUTPUT_COMPRESSION_CHUNK_LENGTH, String.valueOf(CompressionParams.DEFAULT_CHUNK_LENGTH));
     }
 
     public static void setOutputCompressionClass(Configuration conf, String classname)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
index 0717121..bc1e6f6 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
@@ -32,7 +32,7 @@ import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.io.util.DataIntegrityMetadata;
 import org.apache.cassandra.io.util.FileMark;
 import org.apache.cassandra.io.util.SequentialWriter;
-import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.schema.CompressionParams;
 
 public class CompressedSequentialWriter extends SequentialWriter
 {
@@ -60,7 +60,7 @@ public class CompressedSequentialWriter extends SequentialWriter
 
     public CompressedSequentialWriter(File file,
                                       String offsetsPath,
-                                      CompressionParameters parameters,
+                                      CompressionParams parameters,
                                       MetadataCollector sstableMetadataCollector)
     {
         super(file, parameters.chunkLength(), parameters.getSstableCompressor().preferredBufferType());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index ff9ae64..bd6da2c 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -52,6 +52,7 @@ import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
 import org.apache.cassandra.io.util.SafeMemory;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.utils.concurrent.Transactional;
 import org.apache.cassandra.utils.concurrent.Ref;
@@ -69,7 +70,7 @@ public class CompressionMetadata
     private final Memory chunkOffsets;
     private final long chunkOffsetsSize;
     public final String indexFilePath;
-    public final CompressionParameters parameters;
+    public final CompressionParams parameters;
 
     /**
      * Create metadata about given compressed file including uncompressed data length, chunk size
@@ -107,11 +108,11 @@ public class CompressionMetadata
             int chunkLength = stream.readInt();
             try
             {
-                parameters = new CompressionParameters(compressorName, chunkLength, options);
+                parameters = new CompressionParams(compressorName, chunkLength, options);
             }
             catch (ConfigurationException e)
             {
-                throw new RuntimeException("Cannot create CompressionParameters for stored parameters", e);
+                throw new RuntimeException("Cannot create CompressionParams for stored parameters", e);
             }
 
             dataLength = stream.readLong();
@@ -130,7 +131,7 @@ public class CompressionMetadata
         this.chunkOffsetsSize = chunkOffsets.size();
     }
 
-    private CompressionMetadata(String filePath, CompressionParameters parameters, SafeMemory offsets, long offsetsSize, long dataLength, long compressedLength)
+    private CompressionMetadata(String filePath, CompressionParams parameters, SafeMemory offsets, long offsetsSize, long dataLength, long compressedLength)
     {
         this.indexFilePath = filePath;
         this.parameters = parameters;
@@ -275,7 +276,7 @@ public class CompressionMetadata
     public static class Writer extends Transactional.AbstractTransactional implements Transactional
     {
         // path to the file
-        private final CompressionParameters parameters;
+        private final CompressionParams parameters;
         private final String filePath;
         private int maxCount = 100;
         private SafeMemory offsets = new SafeMemory(maxCount * 8L);
@@ -284,13 +285,13 @@ public class CompressionMetadata
         // provided by user when setDescriptor
         private long dataLength, chunkCount;
 
-        private Writer(CompressionParameters parameters, String path)
+        private Writer(CompressionParams parameters, String path)
         {
             this.parameters = parameters;
             filePath = path;
         }
 
-        public static Writer open(CompressionParameters parameters, String path)
+        public static Writer open(CompressionParams parameters, String path)
         {
             return new Writer(parameters, path);
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/compress/CompressionParameters.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionParameters.java b/src/java/org/apache/cassandra/io/compress/CompressionParameters.java
deleted file mode 100644
index c828e27..0000000
--- a/src/java/org/apache/cassandra/io/compress/CompressionParameters.java
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.io.compress;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.AbstractSet;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Sets;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-import org.slf4j.LoggerFactory;
-import org.slf4j.Logger;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.ParameterizedClass;
-import org.apache.cassandra.db.TypeSizes;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.util.DataInputPlus;
-import org.apache.cassandra.io.util.DataOutputPlus;
-
-public class CompressionParameters
-{
-    private final static Logger LOGGER = LoggerFactory.getLogger(CompressionParameters.class);
-
-    private volatile static boolean hasLoggedSsTableCompressionWarning;
-    private volatile static boolean hasLoggedChunkLengthWarning;
-
-    public final static int DEFAULT_CHUNK_LENGTH = 65536;
-    public final static double DEFAULT_CRC_CHECK_CHANCE = 1.0;
-    public final static IVersionedSerializer<CompressionParameters> serializer = new Serializer();
-
-    public static final String CLASS = "class";
-    public static final String CHUNK_LENGTH_IN_KB = "chunk_length_in_kb";
-    public static final String ENABLED = "enabled";
-    @Deprecated
-    public static final String SSTABLE_COMPRESSION = "sstable_compression";
-    @Deprecated
-    public static final String CHUNK_LENGTH_KB = "chunk_length_kb";
-    public static final String CRC_CHECK_CHANCE = "crc_check_chance";
-
-    public static final Set<String> GLOBAL_OPTIONS = ImmutableSet.of(CRC_CHECK_CHANCE);
-
-    private final ICompressor sstableCompressor;
-    private final Integer chunkLength;
-    private volatile double crcCheckChance;
-    private final ImmutableMap<String, String> otherOptions; // Unrecognized options, can be use by the compressor
-    private CFMetaData liveMetadata;
-
-    public static CompressionParameters fromMap(Map<? extends CharSequence, ? extends CharSequence> opts)
-    {
-        Map<String, String> options = copyOptions(opts);
-
-        String sstableCompressionClass;
-
-        if (!removeEnabled(options))
-        {
-            sstableCompressionClass = null;
-
-            if (!options.isEmpty())
-                throw new ConfigurationException("If the '" + ENABLED + "' option is set to false"
-                                                  + " no other options must be specified");
-        }
-        else
-        {
-            sstableCompressionClass= removeSstableCompressionClass(options);
-        }
-
-        Integer chunkLength = removeChunkLength(options);
-
-        CompressionParameters cp = new CompressionParameters(sstableCompressionClass, chunkLength, options);
-        cp.validate();
-
-        return cp;
-    }
-
-    public static CompressionParameters noCompression()
-    {
-        return new CompressionParameters((ICompressor) null, DEFAULT_CHUNK_LENGTH, Collections.emptyMap());
-    }
-
-    public static CompressionParameters snappy()
-    {
-        return snappy(null);
-    }
-
-    public static CompressionParameters snappy(Integer chunkLength)
-    {
-        return new CompressionParameters(SnappyCompressor.instance, chunkLength, Collections.emptyMap());
-    }
-
-    public static CompressionParameters deflate()
-    {
-        return deflate(null);
-    }
-
-    public static CompressionParameters deflate(Integer chunkLength)
-    {
-        return new CompressionParameters(DeflateCompressor.instance, chunkLength, Collections.emptyMap());
-    }
-
-    public static CompressionParameters lz4()
-    {
-        return lz4(null);
-    }
-
-    public static CompressionParameters lz4(Integer chunkLength)
-    {
-        return new CompressionParameters(LZ4Compressor.instance, chunkLength, Collections.emptyMap());
-    }
-
-    CompressionParameters(String sstableCompressorClass, Integer chunkLength, Map<String, String> otherOptions) throws ConfigurationException
-    {
-        this(createCompressor(parseCompressorClass(sstableCompressorClass), otherOptions), chunkLength, otherOptions);
-    }
-
-    private CompressionParameters(ICompressor sstableCompressor, Integer chunkLength, Map<String, String> otherOptions) throws ConfigurationException
-    {
-        this.sstableCompressor = sstableCompressor;
-        this.chunkLength = chunkLength;
-        this.otherOptions = ImmutableMap.copyOf(otherOptions);
-        String chance = otherOptions.get(CRC_CHECK_CHANCE);
-        this.crcCheckChance = (chance == null) ? DEFAULT_CRC_CHECK_CHANCE : parseCrcCheckChance(chance);
-    }
-
-    public CompressionParameters copy()
-    {
-        return new CompressionParameters(sstableCompressor, chunkLength, otherOptions);
-    }
-
-    public void setLiveMetadata(final CFMetaData liveMetadata)
-    {
-        if (liveMetadata == null)
-            return;
-
-        this.liveMetadata = liveMetadata;
-    }
-
-    public void setCrcCheckChance(double crcCheckChance) throws ConfigurationException
-    {
-        validateCrcCheckChance(crcCheckChance);
-        this.crcCheckChance = crcCheckChance;
-
-        if (liveMetadata != null && this != liveMetadata.compressionParameters)
-            liveMetadata.compressionParameters.setCrcCheckChance(crcCheckChance);
-    }
-
-    /**
-     * Checks if compression is enabled.
-     * @return <code>true</code> if compression is enabled, <code>false</code> otherwise.
-     */
-    public boolean isEnabled()
-    {
-        return sstableCompressor != null;
-    }
-
-    /**
-     * Returns the SSTable compressor.
-     * @return the SSTable compressor or <code>null</code> if compression is disabled.
-     */
-    public ICompressor getSstableCompressor()
-    {
-        return sstableCompressor;
-    }
-
-    public ImmutableMap<String, String> getOtherOptions()
-    {
-        return otherOptions;
-    }
-
-    public double getCrcCheckChance()
-    {
-        return liveMetadata == null ? this.crcCheckChance : liveMetadata.compressionParameters.crcCheckChance;
-    }
-
-    private static double parseCrcCheckChance(String crcCheckChance) throws ConfigurationException
-    {
-        try
-        {
-            double chance = Double.parseDouble(crcCheckChance);
-            validateCrcCheckChance(chance);
-            return chance;
-        }
-        catch (NumberFormatException e)
-        {
-            throw new ConfigurationException("crc_check_chance should be a double");
-        }
-    }
-
-    private static void validateCrcCheckChance(double crcCheckChance) throws ConfigurationException
-    {
-        if (crcCheckChance < 0.0d || crcCheckChance > 1.0d)
-            throw new ConfigurationException("crc_check_chance should be between 0.0 and 1.0");
-    }
-
-    public int chunkLength()
-    {
-        return chunkLength == null ? DEFAULT_CHUNK_LENGTH : chunkLength;
-    }
-
-    private static Class<?> parseCompressorClass(String className) throws ConfigurationException
-    {
-        if (className == null || className.isEmpty())
-            return null;
-
-        className = className.contains(".") ? className : "org.apache.cassandra.io.compress." + className;
-        try
-        {
-            return Class.forName(className);
-        }
-        catch (Exception e)
-        {
-            throw new ConfigurationException("Could not create Compression for type " + className, e);
-        }
-    }
-
-    private static ICompressor createCompressor(Class<?> compressorClass, Map<String, String> compressionOptions) throws ConfigurationException
-    {
-        if (compressorClass == null)
-        {
-            if (!compressionOptions.isEmpty())
-                throw new ConfigurationException("Unknown compression options (" + compressionOptions.keySet() + ") since no compression class found");
-            return null;
-        }
-
-        try
-        {
-            Method method = compressorClass.getMethod("create", Map.class);
-            ICompressor compressor = (ICompressor)method.invoke(null, compressionOptions);
-            // Check for unknown options
-            AbstractSet<String> supportedOpts = Sets.union(compressor.supportedOptions(), GLOBAL_OPTIONS);
-            for (String provided : compressionOptions.keySet())
-                if (!supportedOpts.contains(provided))
-                    throw new ConfigurationException("Unknown compression options " + provided);
-            return compressor;
-        }
-        catch (NoSuchMethodException e)
-        {
-            throw new ConfigurationException("create method not found", e);
-        }
-        catch (SecurityException e)
-        {
-            throw new ConfigurationException("Access forbiden", e);
-        }
-        catch (IllegalAccessException e)
-        {
-            throw new ConfigurationException("Cannot access method create in " + compressorClass.getName(), e);
-        }
-        catch (InvocationTargetException e)
-        {
-            Throwable cause = e.getCause();
-            throw new ConfigurationException(String.format("%s.create() threw an error: %s",
-                                             compressorClass.getSimpleName(),
-                                             cause == null ? e.getClass().getName() + " " + e.getMessage() : cause.getClass().getName() + " " + cause.getMessage()),
-                                             e);
-        }
-        catch (ExceptionInInitializerError e)
-        {
-            throw new ConfigurationException("Cannot initialize class " + compressorClass.getName());
-        }
-    }
-
-    public static ICompressor createCompressor(ParameterizedClass compression) throws ConfigurationException {
-        return createCompressor(parseCompressorClass(compression.class_name), copyOptions(compression.parameters));
-    }
-
-    private static Map<String, String> copyOptions(Map<? extends CharSequence, ? extends CharSequence> co)
-    {
-        if (co == null || co.isEmpty())
-            return Collections.<String, String>emptyMap();
-
-        Map<String, String> compressionOptions = new HashMap<String, String>();
-        for (Map.Entry<? extends CharSequence, ? extends CharSequence> entry : co.entrySet())
-        {
-            compressionOptions.put(entry.getKey().toString(), entry.getValue().toString());
-        }
-        return compressionOptions;
-    }
-
-    /**
-     * Parse the chunk length (in KB) and returns it as bytes.
-     * 
-     * @param chLengthKB the length of the chunk to parse
-     * @return the chunk length in bytes
-     * @throws ConfigurationException if the chunk size is too large
-     */
-    private static Integer parseChunkLength(String chLengthKB) throws ConfigurationException
-    {
-        if (chLengthKB == null)
-            return null;
-
-        try
-        {
-            int parsed = Integer.parseInt(chLengthKB);
-            if (parsed > Integer.MAX_VALUE / 1024)
-                throw new ConfigurationException("Value of " + CHUNK_LENGTH_IN_KB + " is too large (" + parsed + ")");
-            return 1024 * parsed;
-        }
-        catch (NumberFormatException e)
-        {
-            throw new ConfigurationException("Invalid value for " + CHUNK_LENGTH_IN_KB, e);
-        }
-    }
-
-    /**
-     * Removes the chunk length option from the specified set of option.
-     *
-     * @param options the options
-     * @return the chunk length value
-     */
-    private static Integer removeChunkLength(Map<String, String> options)
-    {
-        if (options.containsKey(CHUNK_LENGTH_IN_KB))
-        {
-            if (options.containsKey(CHUNK_LENGTH_KB))
-            {
-                throw new ConfigurationException(String.format("The '%s' option must not be used if the chunk length is already specified by the '%s' option",
-                                                               CHUNK_LENGTH_KB,
-                                                               CHUNK_LENGTH_IN_KB));
-            }
-
-            return parseChunkLength(options.remove(CHUNK_LENGTH_IN_KB));
-        }
-
-        if (options.containsKey(CHUNK_LENGTH_KB))
-        {
-            if (options.containsKey(CHUNK_LENGTH_KB) && !hasLoggedChunkLengthWarning)
-            {
-                hasLoggedChunkLengthWarning = true;
-                LOGGER.warn(String.format("The %s option has been deprecated. You should use %s instead",
-                                          CHUNK_LENGTH_KB,
-                                          CHUNK_LENGTH_IN_KB));
-            }
-
-            return parseChunkLength(options.remove(CHUNK_LENGTH_KB));
-        }
-
-        return null;
-    }
-
-    /**
-     * Returns <code>true</code> if the specified options contains the name of the compression class to be used,
-     * <code>false</code> otherwise.
-     *
-     * @param options the options
-     * @return <code>true</code> if the specified options contains the name of the compression class to be used,
-     * <code>false</code> otherwise.
-     */
-    public static boolean containsSstableCompressionClass(Map<String, String> options)
-    {
-        return options.containsKey(CLASS)
-                || options.containsKey(SSTABLE_COMPRESSION);
-    }
-
-    /**
-     * Removes the option specifying the name of the compression class
-     *
-     * @param options the options
-     * @return the name of the compression class
-     */
-    private static String removeSstableCompressionClass(Map<String, String> options)
-    {
-        if (options.containsKey(CLASS))
-        {
-            if (options.containsKey(SSTABLE_COMPRESSION))
-                throw new ConfigurationException(String.format("The '%s' option must not be used if the compression algorithm is already specified by the '%s' option",
-                                                               SSTABLE_COMPRESSION,
-                                                               CLASS));
-
-            String clazz = options.remove(CLASS);
-            if (clazz.isEmpty())
-                throw new ConfigurationException(String.format("The '%s' option must not be empty. To disable compression use 'enabled' : false", CLASS));
-
-            return clazz;
-        }
-
-        if (options.containsKey(SSTABLE_COMPRESSION) && !hasLoggedSsTableCompressionWarning)
-        {
-            hasLoggedSsTableCompressionWarning = true;
-            LOGGER.warn(String.format("The %s option has been deprecated. You should use %s instead",
-                                      SSTABLE_COMPRESSION,
-                                      CLASS));
-        }
-
-        return options.remove(SSTABLE_COMPRESSION);
-    }
-
-    /**
-     * Returns <code>true</code> if the options contains the <code>enabled</code> option and that its value is
-     * <code>true</code>, otherwise returns <code>false</code>.
-     *
-     * @param options the options
-     * @return <code>true</code> if the options contains the <code>enabled</code> option and that its value is
-     * <code>true</code>, otherwise returns <code>false</code>.
-     */
-    public static boolean isEnabled(Map<String, String> options)
-    {
-        String enabled = options.get(ENABLED);
-        return enabled == null || Boolean.parseBoolean(enabled);
-    }
-
-    /**
-     * Removes the <code>enabled</code> option from the specified options.
-     *
-     * @param options the options
-     * @return the value of the <code>enabled</code> option
-     */
-    private static boolean removeEnabled(Map<String, String> options)
-    {
-        String enabled = options.remove(ENABLED);
-        return enabled == null || Boolean.parseBoolean(enabled);
-    }
-
-    // chunkLength must be a power of 2 because we assume so when
-    // computing the chunk number from an uncompressed file offset (see
-    // CompressedRandomAccessReader.decompresseChunk())
-    public void validate() throws ConfigurationException
-    {
-        // if chunk length was not set (chunkLength == null), this is fine, default will be used
-        if (chunkLength != null)
-        {
-            if (chunkLength <= 0)
-                throw new ConfigurationException("Invalid negative or null " + CHUNK_LENGTH_IN_KB);
-
-            int c = chunkLength;
-            boolean found = false;
-            while (c != 0)
-            {
-                if ((c & 0x01) != 0)
-                {
-                    if (found)
-                        throw new ConfigurationException(CHUNK_LENGTH_IN_KB + " must be a power of 2");
-                    else
-                        found = true;
-                }
-                c >>= 1;
-            }
-        }
-
-        validateCrcCheckChance(crcCheckChance);
-    }
-
-    public Map<String, String> asMap()
-    {
-        if (!isEnabled())
-            return Collections.singletonMap(ENABLED, "false");
-
-        Map<String, String> options = new HashMap<String, String>(otherOptions);
-        options.put(CLASS, sstableCompressor.getClass().getName());
-        options.put(CHUNK_LENGTH_IN_KB, chunkLengthInKB());
-        return options;
-    }
-
-    public String chunkLengthInKB()
-    {
-        return String.valueOf(chunkLength() / 1024);
-    }
-
-    @Override
-    public boolean equals(Object obj)
-    {
-        if (obj == this)
-        {
-            return true;
-        }
-        else if (obj == null || obj.getClass() != getClass())
-        {
-            return false;
-        }
-
-        CompressionParameters cp = (CompressionParameters) obj;
-        return new EqualsBuilder()
-            .append(sstableCompressor, cp.sstableCompressor)
-            .append(chunkLength(), cp.chunkLength())
-            .append(otherOptions, cp.otherOptions)
-            .isEquals();
-    }
-
-    @Override
-    public int hashCode()
-    {
-        return new HashCodeBuilder(29, 1597)
-            .append(sstableCompressor)
-            .append(chunkLength())
-            .append(otherOptions)
-            .toHashCode();
-    }
-
-    static class Serializer implements IVersionedSerializer<CompressionParameters>
-    {
-        public void serialize(CompressionParameters parameters, DataOutputPlus out, int version) throws IOException
-        {
-            out.writeUTF(parameters.sstableCompressor.getClass().getSimpleName());
-            out.writeInt(parameters.otherOptions.size());
-            for (Map.Entry<String, String> entry : parameters.otherOptions.entrySet())
-            {
-                out.writeUTF(entry.getKey());
-                out.writeUTF(entry.getValue());
-            }
-            out.writeInt(parameters.chunkLength());
-        }
-
-        public CompressionParameters deserialize(DataInputPlus in, int version) throws IOException
-        {
-            String compressorName = in.readUTF();
-            int optionCount = in.readInt();
-            Map<String, String> options = new HashMap<String, String>();
-            for (int i = 0; i < optionCount; ++i)
-            {
-                String key = in.readUTF();
-                String value = in.readUTF();
-                options.put(key, value);
-            }
-            int chunkLength = in.readInt();
-            CompressionParameters parameters;
-            try
-            {
-                parameters = new CompressionParameters(compressorName, chunkLength, options);
-            }
-            catch (ConfigurationException e)
-            {
-                throw new RuntimeException("Cannot create CompressionParameters for parameters", e);
-            }
-            return parameters;
-        }
-
-        public long serializedSize(CompressionParameters parameters, int version)
-        {
-            long size = TypeSizes.sizeof(parameters.sstableCompressor.getClass().getSimpleName());
-            size += TypeSizes.sizeof(parameters.otherOptions.size());
-            for (Map.Entry<String, String> entry : parameters.otherOptions.entrySet())
-            {
-                size += TypeSizes.sizeof(entry.getKey());
-                size += TypeSizes.sizeof(entry.getValue());
-            }
-            size += TypeSizes.sizeof(parameters.chunkLength());
-            return size;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/compress/LZ4Compressor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/LZ4Compressor.java b/src/java/org/apache/cassandra/io/compress/LZ4Compressor.java
index 5fd4309..069cc96 100644
--- a/src/java/org/apache/cassandra/io/compress/LZ4Compressor.java
+++ b/src/java/org/apache/cassandra/io/compress/LZ4Compressor.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import com.google.common.annotations.VisibleForTesting;
 import net.jpountz.lz4.LZ4Exception;
 import net.jpountz.lz4.LZ4Factory;
+import org.apache.cassandra.schema.CompressionParams;
 
 public class LZ4Compressor implements ICompressor
 {
@@ -126,7 +127,7 @@ public class LZ4Compressor implements ICompressor
 
     public Set<String> supportedOptions()
     {
-        return new HashSet<>(Arrays.asList(CompressionParameters.CRC_CHECK_CHANCE));
+        return new HashSet<>(Arrays.asList(CompressionParams.CRC_CHECK_CHANCE));
     }
 
     public BufferType preferredBufferType()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
index 9b6ab6b..28b5964 100644
--- a/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
+++ b/src/java/org/apache/cassandra/io/sstable/IndexSummaryManager.java
@@ -332,8 +332,8 @@ public class IndexSummaryManager implements IndexSummaryManagerMBean
         long remainingSpace = memoryPoolCapacity;
         for (SSTableReader sstable : sstables)
         {
-            int minIndexInterval = sstable.metadata.getMinIndexInterval();
-            int maxIndexInterval = sstable.metadata.getMaxIndexInterval();
+            int minIndexInterval = sstable.metadata.params.minIndexInterval;
+            int maxIndexInterval = sstable.metadata.params.maxIndexInterval;
 
             double readsPerSec = sstable.getReadMeter() == null ? 0.0 : sstable.getReadMeter().fifteenMinuteRate();
             long idealSpace = Math.round(remainingSpace * (readsPerSec / totalReadsPerSec));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index 5ceced5..f13fbba 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -36,7 +36,6 @@ import org.slf4j.LoggerFactory;
 import com.clearspring.analytics.stream.cardinality.CardinalityMergeException;
 import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus;
 import com.clearspring.analytics.stream.cardinality.ICardinality;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.cache.InstrumentingCache;
 import org.apache.cassandra.cache.KeyCacheKey;
 import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
@@ -56,6 +55,7 @@ import org.apache.cassandra.io.sstable.metadata.*;
 import org.apache.cassandra.io.util.*;
 import org.apache.cassandra.metrics.RestorableMeter;
 import org.apache.cassandra.metrics.StorageMetrics;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.utils.*;
@@ -650,7 +650,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
 
     private void load(ValidationMetadata validation) throws IOException
     {
-        if (metadata.getBloomFilterFpChance() == 1.0)
+        if (metadata.params.bloomFilterFpChance == 1.0)
         {
             // bf is disabled.
             load(false, true);
@@ -667,7 +667,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
             // bf is enabled, but filter component is missing.
             load(true, true);
         }
-        else if (validation.bloomFilterFPChance != metadata.getBloomFilterFpChance())
+        else if (validation.bloomFilterFPChance != metadata.params.bloomFilterFpChance)
         {
             // bf fp chance in sstable metadata and it has changed since compaction.
             load(true, true);
@@ -789,9 +789,9 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
                     : estimateRowsFromIndex(primaryIndex); // statistics is supposed to be optional
 
             if (recreateBloomFilter)
-                bf = FilterFactory.getFilter(estimatedKeys, metadata.getBloomFilterFpChance(), true, descriptor.version.hasOldBfHashOrder());
+                bf = FilterFactory.getFilter(estimatedKeys, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder());
 
-            try (IndexSummaryBuilder summaryBuilder = summaryLoaded ? null : new IndexSummaryBuilder(estimatedKeys, metadata.getMinIndexInterval(), samplingLevel))
+            try (IndexSummaryBuilder summaryBuilder = summaryLoaded ? null : new IndexSummaryBuilder(estimatedKeys, metadata.params.minIndexInterval, samplingLevel))
             {
                 long indexPosition;
                 RowIndexEntry.IndexSerializer rowIndexSerializer = descriptor.getFormat().getIndexSerializer(metadata, descriptor.version, header);
@@ -849,7 +849,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
             iStream = new DataInputStream(new FileInputStream(summariesFile));
             indexSummary = IndexSummary.serializer.deserialize(
                     iStream, getPartitioner(), descriptor.version.hasSamplingLevel(),
-                    metadata.getMinIndexInterval(), metadata.getMaxIndexInterval());
+                    metadata.params.minIndexInterval, metadata.params.maxIndexInterval);
             first = decorateKey(ByteBufferUtil.readWithLength(iStream));
             last = decorateKey(ByteBufferUtil.readWithLength(iStream));
             ibuilder.deserializeBounds(iStream);
@@ -1134,8 +1134,8 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
         {
             assert openReason != OpenReason.EARLY;
 
-            int minIndexInterval = metadata.getMinIndexInterval();
-            int maxIndexInterval = metadata.getMaxIndexInterval();
+            int minIndexInterval = metadata.params.minIndexInterval;
+            int maxIndexInterval = metadata.params.maxIndexInterval;
             double effectiveInterval = indexSummary.getEffectiveIndexInterval();
 
             IndexSummary newSummary;
@@ -1183,7 +1183,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
         try
         {
             long indexSize = primaryIndex.length();
-            try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.getMinIndexInterval(), newSamplingLevel))
+            try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.params.minIndexInterval, newSamplingLevel))
             {
                 long indexPosition;
                 while ((indexPosition = primaryIndex.getFilePointer()) != indexSize)
@@ -1515,14 +1515,10 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
 
     public void cacheKey(DecoratedKey key, RowIndexEntry info)
     {
-        CachingOptions caching = metadata.getCaching();
+        CachingParams caching = metadata.params.caching;
 
-        if (!caching.keyCache.isEnabled()
-                || keyCache == null
-                || keyCache.getCapacity() == 0)
-        {
+        if (!caching.cacheKeys() || keyCache == null || keyCache.getCapacity() == 0)
             return;
-        }
 
         KeyCacheKey cacheKey = new KeyCacheKey(metadata.cfId, descriptor, key.getKey());
         logger.trace("Adding cache entry for {} -> {}", cacheKey, info);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
index fa691b8..1a3f796 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableWriter.java
@@ -133,10 +133,10 @@ public abstract class SSTableWriter extends SSTable implements Transactional
                 Component.TOC,
                 Component.DIGEST));
 
-        if (metadata.getBloomFilterFpChance() < 1.0)
+        if (metadata.params.bloomFilterFpChance < 1.0)
             components.add(Component.FILTER);
 
-        if (metadata.compressionParameters().isEnabled())
+        if (metadata.params.compression.isEnabled())
         {
             components.add(Component.COMPRESSION_INFO);
         }
@@ -251,7 +251,7 @@ public abstract class SSTableWriter extends SSTable implements Transactional
     protected Map<MetadataType, MetadataComponent> finalizeMetadata()
     {
         return metadataCollector.finalizeMetadata(getPartitioner().getClass().getCanonicalName(),
-                                                  metadata.getBloomFilterFpChance(),
+                                                  metadata.params.bloomFilterFpChance,
                                                   repairedAt,
                                                   header);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
index 5607a7e..2b60479 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/big/BigTableWriter.java
@@ -71,7 +71,7 @@ public class BigTableWriter extends SSTableWriter
         {
             dataFile = SequentialWriter.open(getFilename(),
                                              descriptor.filenameFor(Component.COMPRESSION_INFO),
-                                             metadata.compressionParameters(),
+                                             metadata.params.compression,
                                              metadataCollector);
             dbuilder = SegmentedFile.getCompressedBuilder((CompressedSequentialWriter) dataFile);
         }
@@ -376,8 +376,8 @@ public class BigTableWriter extends SSTableWriter
         {
             indexFile = SequentialWriter.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX)));
             builder = SegmentedFile.getBuilder(DatabaseDescriptor.getIndexAccessMode(), false);
-            summary = new IndexSummaryBuilder(keyCount, metadata.getMinIndexInterval(), Downsampling.BASE_SAMPLING_LEVEL);
-            bf = FilterFactory.getFilter(keyCount, metadata.getBloomFilterFpChance(), true, descriptor.version.hasOldBfHashOrder());
+            summary = new IndexSummaryBuilder(keyCount, metadata.params.minIndexInterval, Downsampling.BASE_SAMPLING_LEVEL);
+            bf = FilterFactory.getFilter(keyCount, metadata.params.bloomFilterFpChance, true, descriptor.version.hasOldBfHashOrder());
             // register listeners to be alerted when the data files are flushed
             indexFile.setPostFlushListener(new Runnable()
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/io/util/SequentialWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/util/SequentialWriter.java b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
index 90340ca..ddabe89 100644
--- a/src/java/org/apache/cassandra/io/util/SequentialWriter.java
+++ b/src/java/org/apache/cassandra/io/util/SequentialWriter.java
@@ -29,7 +29,7 @@ import org.apache.cassandra.io.FSReadError;
 import org.apache.cassandra.io.FSWriteError;
 import org.apache.cassandra.io.compress.BufferType;
 import org.apache.cassandra.io.compress.CompressedSequentialWriter;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.sstable.Descriptor;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.utils.CLibrary;
@@ -168,7 +168,7 @@ public class SequentialWriter extends OutputStream implements WritableByteChanne
 
     public static CompressedSequentialWriter open(String dataFilePath,
                                                   String offsetsPath,
-                                                  CompressionParameters parameters,
+                                                  CompressionParams parameters,
                                                   MetadataCollector sstableMetadataCollector)
     {
         return new CompressedSequentialWriter(new File(dataFilePath), offsetsPath, parameters, sstableMetadataCollector);


[3/5] cassandra git commit: Factor out TableParams from CFMetaData

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/CachingParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/CachingParams.java b/src/java/org/apache/cassandra/schema/CachingParams.java
new file mode 100644
index 0000000..2b5ab7c
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/CachingParams.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+import static java.lang.String.format;
+
+// CQL: {'keys' : 'ALL'|'NONE', 'rows_per_partition': '200'|'NONE'|'ALL'}
+public final class CachingParams
+{
+    public enum Option
+    {
+        KEYS,
+        ROWS_PER_PARTITION;
+
+        @Override
+        public String toString()
+        {
+            return name().toLowerCase();
+        }
+    }
+
+    private static final String ALL = "ALL";
+    private static final String NONE = "NONE";
+
+    static final boolean DEFAULT_CACHE_KEYS = true;
+    static final int DEFAULT_ROWS_PER_PARTITION_TO_CACHE = 0;
+
+    public static final CachingParams CACHE_NOTHING = new CachingParams(false, 0);
+    public static final CachingParams CACHE_KEYS = new CachingParams(true, 0);
+    public static final CachingParams CACHE_EVERYTHING = new CachingParams(true, Integer.MAX_VALUE);
+
+    static final CachingParams DEFAULT = new CachingParams(DEFAULT_CACHE_KEYS, DEFAULT_ROWS_PER_PARTITION_TO_CACHE);
+
+    final boolean cacheKeys;
+    final int rowsPerPartitionToCache;
+
+    public CachingParams(boolean cacheKeys, int rowsPerPartitionToCache)
+    {
+        this.cacheKeys = cacheKeys;
+        this.rowsPerPartitionToCache = rowsPerPartitionToCache;
+    }
+
+    public boolean cacheKeys()
+    {
+        return cacheKeys;
+    }
+
+    public boolean cacheRows()
+    {
+        return rowsPerPartitionToCache > 0;
+    }
+
+    public boolean cacheAllRows()
+    {
+        return rowsPerPartitionToCache == Integer.MAX_VALUE;
+    }
+
+    public int rowsPerPartitionToCache()
+    {
+        return rowsPerPartitionToCache;
+    }
+
+    public static CachingParams fromMap(Map<String, String> map)
+    {
+        Map<String, String> copy = new HashMap<>(map);
+
+        String keys = copy.remove(Option.KEYS.toString());
+        boolean cacheKeys = keys != null && keysFromString(keys);
+
+        String rows = copy.remove(Option.ROWS_PER_PARTITION.toString());
+        int rowsPerPartitionToCache = rows == null
+                                    ? 0
+                                    : rowsPerPartitionFromString(rows);
+
+        if (!copy.isEmpty())
+        {
+            throw new ConfigurationException(format("Invalid caching sub-options %s: only '%s' and '%s' are allowed",
+                                                    copy.keySet(),
+                                                    Option.KEYS,
+                                                    Option.ROWS_PER_PARTITION));
+        }
+
+        return new CachingParams(cacheKeys, rowsPerPartitionToCache);
+    }
+
+    public Map<String, String> asMap()
+    {
+        return ImmutableMap.of(Option.KEYS.toString(),
+                               keysAsString(),
+                               Option.ROWS_PER_PARTITION.toString(),
+                               rowsPerPartitionAsString());
+    }
+
+    private static boolean keysFromString(String value)
+    {
+        if (value.equalsIgnoreCase(ALL))
+            return true;
+
+        if (value.equalsIgnoreCase(NONE))
+            return false;
+
+        throw new ConfigurationException(format("Invalid value '%s' for caching sub-option '%s': only '%s' and '%s' are allowed",
+                                                value,
+                                                Option.KEYS,
+                                                ALL,
+                                                NONE));
+    }
+
+    String keysAsString()
+    {
+        return cacheKeys ? ALL : NONE;
+    }
+
+    private static int rowsPerPartitionFromString(String value)
+    {
+        if (value.equalsIgnoreCase(ALL))
+            return Integer.MAX_VALUE;
+
+        if (value.equalsIgnoreCase(NONE))
+            return 0;
+
+        if (StringUtils.isNumeric(value))
+            return Integer.parseInt(value);
+
+        throw new ConfigurationException(format("Invalid value '%s' for caching sub-option '%s':"
+                                                + " only '%s', '%s', and integer values are allowed",
+                                                value,
+                                                Option.ROWS_PER_PARTITION,
+                                                ALL,
+                                                NONE));
+    }
+
+    String rowsPerPartitionAsString()
+    {
+        if (rowsPerPartitionToCache == 0)
+            return NONE;
+        else if (rowsPerPartitionToCache == Integer.MAX_VALUE)
+            return ALL;
+        else
+            return Integer.toString(rowsPerPartitionToCache);
+    }
+
+    @Override
+    public String toString()
+    {
+        return format("{'%s' : '%s', '%s' : '%s'}",
+                      Option.KEYS,
+                      keysAsString(),
+                      Option.ROWS_PER_PARTITION,
+                      rowsPerPartitionAsString());
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof CachingParams))
+            return false;
+
+        CachingParams c = (CachingParams) o;
+
+        return cacheKeys == c.cacheKeys && rowsPerPartitionToCache == c.rowsPerPartitionToCache;
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(cacheKeys, rowsPerPartitionToCache);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/CompactionParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/CompactionParams.java b/src/java/org/apache/cassandra/schema/CompactionParams.java
new file mode 100644
index 0000000..720efa3
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/CompactionParams.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.lang.reflect.InvocationTargetException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
+import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
+import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.utils.FBUtilities;
+
+import static java.lang.String.format;
+
+public final class CompactionParams
+{
+    private static final Logger logger = LoggerFactory.getLogger(CompactionParams.class);
+
+    public enum Option
+    {
+        CLASS,
+        ENABLED,
+        MIN_THRESHOLD,
+        MAX_THRESHOLD;
+
+        @Override
+        public String toString()
+        {
+            return name().toLowerCase();
+        }
+    }
+
+    public static final int DEFAULT_MIN_THRESHOLD = 4;
+    public static final int DEFAULT_MAX_THRESHOLD = 32;
+
+    public static final boolean DEFAULT_ENABLED = true;
+
+    public static final Map<String, String> DEFAULT_THRESHOLDS =
+        ImmutableMap.of(Option.MIN_THRESHOLD.toString(), Integer.toString(DEFAULT_MIN_THRESHOLD),
+                        Option.MAX_THRESHOLD.toString(), Integer.toString(DEFAULT_MAX_THRESHOLD));
+
+    public static final CompactionParams DEFAULT =
+        new CompactionParams(SizeTieredCompactionStrategy.class, DEFAULT_THRESHOLDS, DEFAULT_ENABLED);
+
+    private final Class<? extends AbstractCompactionStrategy> klass;
+    private final ImmutableMap<String, String> options;
+    private final boolean isEnabled;
+
+    private CompactionParams(Class<? extends AbstractCompactionStrategy> klass, Map<String, String> options, boolean isEnabled)
+    {
+        this.klass = klass;
+        this.options = ImmutableMap.copyOf(options);
+        this.isEnabled = isEnabled;
+    }
+
+    public static CompactionParams create(Class<? extends AbstractCompactionStrategy> klass, Map<String, String> options)
+    {
+        boolean isEnabled = options.containsKey(Option.ENABLED.toString())
+                          ? Boolean.parseBoolean(options.get(Option.ENABLED.toString()))
+                          : DEFAULT_ENABLED;
+
+        Map<String, String> allOptions = new HashMap<>(options);
+        if (supportsThresholdParams(klass))
+        {
+            allOptions.putIfAbsent(Option.MIN_THRESHOLD.toString(), Integer.toString(DEFAULT_MIN_THRESHOLD));
+            allOptions.putIfAbsent(Option.MAX_THRESHOLD.toString(), Integer.toString(DEFAULT_MAX_THRESHOLD));
+        }
+
+        return new CompactionParams(klass, allOptions, isEnabled);
+    }
+
+    public static CompactionParams scts(Map<String, String> options)
+    {
+        return create(SizeTieredCompactionStrategy.class, options);
+    }
+
+    public static CompactionParams lcs(Map<String, String> options)
+    {
+        return create(LeveledCompactionStrategy.class, options);
+    }
+
+    public int minCompactionThreshold()
+    {
+        String threshold = options.get(Option.MIN_THRESHOLD.toString());
+        return threshold == null
+             ? DEFAULT_MIN_THRESHOLD
+             : Integer.parseInt(threshold);
+    }
+
+    public int maxCompactionThreshold()
+    {
+        String threshold = options.get(Option.MAX_THRESHOLD.toString());
+        return threshold == null
+             ? DEFAULT_MAX_THRESHOLD
+             : Integer.parseInt(threshold);
+    }
+
+    public void validate()
+    {
+        try
+        {
+            Map<?, ?> unknownOptions = (Map) klass.getMethod("validateOptions", Map.class).invoke(null, options);
+            if (!unknownOptions.isEmpty())
+            {
+                throw new ConfigurationException(format("Properties specified %s are not understood by %s",
+                                                        unknownOptions.keySet(),
+                                                        klass.getSimpleName()));
+            }
+        }
+        catch (NoSuchMethodException e)
+        {
+            logger.warn("Compaction strategy {} does not have a static validateOptions method. Validation ignored",
+                        klass.getName());
+        }
+        catch (InvocationTargetException e)
+        {
+            if (e.getTargetException() instanceof ConfigurationException)
+                throw (ConfigurationException) e.getTargetException();
+
+            Throwable cause = e.getCause() == null
+                            ? e
+                            : e.getCause();
+
+            throw new ConfigurationException(format("%s.validateOptions() threw an error: %s %s",
+                                                    klass.getName(),
+                                                    cause.getClass().getName(),
+                                                    cause.getMessage()),
+                                             e);
+        }
+        catch (IllegalAccessException e)
+        {
+            throw new ConfigurationException("Cannot access method validateOptions in " + klass.getName(), e);
+        }
+
+        String minThreshold = options.get(Option.MIN_THRESHOLD.toString());
+        if (minThreshold != null && !StringUtils.isNumeric(minThreshold))
+        {
+            throw new ConfigurationException(format("Invalid value %s for '%s' compaction sub-option - must be an integer",
+                                                    minThreshold,
+                                                    Option.MIN_THRESHOLD));
+        }
+
+        String maxThreshold = options.get(Option.MAX_THRESHOLD.toString());
+        if (maxThreshold != null && !StringUtils.isNumeric(maxThreshold))
+        {
+            throw new ConfigurationException(format("Invalid value %s for '%s' compaction sub-option - must be an integer",
+                                                    maxThreshold,
+                                                    Option.MAX_THRESHOLD));
+        }
+
+        if (minCompactionThreshold() <= 0 || maxCompactionThreshold() <= 0)
+        {
+            throw new ConfigurationException("Disabling compaction by setting compaction thresholds to 0 has been removed,"
+                                             + " set the compaction option 'enabled' to false instead.");
+        }
+
+        if (minCompactionThreshold() <= 1)
+        {
+            throw new ConfigurationException(format("Min compaction threshold cannot be less than 2 (got %d)",
+                                                    minCompactionThreshold()));
+        }
+
+        if (minCompactionThreshold() > maxCompactionThreshold())
+        {
+            throw new ConfigurationException(format("Min compaction threshold (got %d) cannot be greater than max compaction threshold (got %d)",
+                                                    minCompactionThreshold(),
+                                                    maxCompactionThreshold()));
+        }
+    }
+
+    double defaultBloomFilterFbChance()
+    {
+        return klass.equals(LeveledCompactionStrategy.class) ? 0.1 : 0.01;
+    }
+
+    public Class<? extends AbstractCompactionStrategy> klass()
+    {
+        return klass;
+    }
+
+    /**
+     * All strategy options - excluding 'class'.
+     */
+    public Map<String, String> options()
+    {
+        return options;
+    }
+
+    public boolean isEnabled()
+    {
+        return isEnabled;
+    }
+
+    public static CompactionParams fromMap(Map<String, String> map)
+    {
+        Map<String, String> options = new HashMap<>(map);
+
+        String className = options.remove(Option.CLASS.toString());
+        if (className == null)
+        {
+            throw new ConfigurationException(format("Missing sub-option '%s' for the '%s' option",
+                                                    Option.CLASS,
+                                                    TableParams.Option.COMPACTION));
+        }
+
+        return create(classFromName(className), options);
+    }
+
+    private static Class<? extends AbstractCompactionStrategy> classFromName(String name)
+    {
+        String className = name.contains(".")
+                         ? name
+                         : "org.apache.cassandra.db.compaction." + name;
+        Class<AbstractCompactionStrategy> strategyClass = FBUtilities.classForName(className, "compaction strategy");
+
+        if (!AbstractCompactionStrategy.class.isAssignableFrom(strategyClass))
+        {
+            throw new ConfigurationException(format("Compaction strategy class %s is not derived from AbstractReplicationStrategy",
+                                                    className));
+        }
+
+        return strategyClass;
+    }
+
+    /*
+     * LCS doesn't, STCS and DTCS do
+     */
+    @SuppressWarnings("unchecked")
+    public static boolean supportsThresholdParams(Class<? extends AbstractCompactionStrategy> klass)
+    {
+        try
+        {
+            Map<String, String> unrecognizedOptions =
+                (Map<String, String>) klass.getMethod("validateOptions", Map.class)
+                                           .invoke(null, DEFAULT_THRESHOLDS);
+
+            return unrecognizedOptions.isEmpty();
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+    }
+
+    public Map<String, String> asMap()
+    {
+        Map<String, String> map = new HashMap<>(options());
+        map.put(Option.CLASS.toString(), klass.getName());
+        return map;
+    }
+
+    @Override
+    public String toString()
+    {
+        return MoreObjects.toStringHelper(this)
+                          .add("class", klass.getName())
+                          .add("options", options)
+                          .toString();
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof CompactionParams))
+            return false;
+
+        CompactionParams cp = (CompactionParams) o;
+
+        return klass.equals(cp.klass) && options.equals(cp.options);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hash(klass, options);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/CompressionParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/CompressionParams.java b/src/java/org/apache/cassandra/schema/CompressionParams.java
new file mode 100644
index 0000000..a73fcd1
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/CompressionParams.java
@@ -0,0 +1,579 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.*;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
+
+import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.config.ParameterizedClass;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.io.IVersionedSerializer;
+import org.apache.cassandra.io.compress.*;
+import org.apache.cassandra.io.util.DataInputPlus;
+import org.apache.cassandra.io.util.DataOutputPlus;
+
+import static java.lang.String.format;
+
+@SuppressWarnings("deprecation")
+public final class CompressionParams
+{
+    private static final Logger logger = LoggerFactory.getLogger(CompressionParams.class);
+
+    private static volatile boolean hasLoggedSsTableCompressionWarning;
+    private static volatile boolean hasLoggedChunkLengthWarning;
+
+    public static final int DEFAULT_CHUNK_LENGTH = 65536;
+    public static final double DEFAULT_CRC_CHECK_CHANCE = 1.0;
+    public static final IVersionedSerializer<CompressionParams> serializer = new Serializer();
+
+    public static final String CLASS = "class";
+    public static final String CHUNK_LENGTH_IN_KB = "chunk_length_in_kb";
+    public static final String ENABLED = "enabled";
+
+    public static final CompressionParams DEFAULT = new CompressionParams(LZ4Compressor.instance,
+                                                                          DEFAULT_CHUNK_LENGTH,
+                                                                          Collections.emptyMap());
+
+    @Deprecated public static final String SSTABLE_COMPRESSION = "sstable_compression";
+    @Deprecated public static final String CHUNK_LENGTH_KB = "chunk_length_kb";
+
+    public static final String CRC_CHECK_CHANCE = "crc_check_chance";
+
+    public static final Set<String> GLOBAL_OPTIONS = ImmutableSet.of(CRC_CHECK_CHANCE);
+
+    private final ICompressor sstableCompressor;
+    private final Integer chunkLength;
+    private volatile double crcCheckChance;
+    private final ImmutableMap<String, String> otherOptions; // Unrecognized options, can be use by the compressor
+    private CFMetaData liveMetadata;
+
+    public static CompressionParams fromMap(Map<String, String> opts)
+    {
+        Map<String, String> options = copyOptions(opts);
+
+        String sstableCompressionClass;
+
+        if (!opts.isEmpty() && isEnabled(opts) && !containsSstableCompressionClass(opts))
+            throw new ConfigurationException(format("Missing sub-option '%s' for the 'compression' option.", CLASS));
+
+        if (!removeEnabled(options))
+        {
+            sstableCompressionClass = null;
+
+            if (!options.isEmpty())
+                throw new ConfigurationException(format("If the '%s' option is set to false no other options must be specified", ENABLED));
+        }
+        else
+        {
+            sstableCompressionClass = removeSstableCompressionClass(options);
+        }
+
+        Integer chunkLength = removeChunkLength(options);
+
+        CompressionParams cp = new CompressionParams(sstableCompressionClass, chunkLength, options);
+        cp.validate();
+
+        return cp;
+    }
+
+    public Class<? extends ICompressor> klass()
+    {
+        return sstableCompressor.getClass();
+    }
+
+    public static CompressionParams noCompression()
+    {
+        return new CompressionParams((ICompressor) null, DEFAULT_CHUNK_LENGTH, Collections.emptyMap());
+    }
+
+    public static CompressionParams snappy()
+    {
+        return snappy(null);
+    }
+
+    public static CompressionParams snappy(Integer chunkLength)
+    {
+        return new CompressionParams(SnappyCompressor.instance, chunkLength, Collections.emptyMap());
+    }
+
+    public static CompressionParams deflate()
+    {
+        return deflate(null);
+    }
+
+    public static CompressionParams deflate(Integer chunkLength)
+    {
+        return new CompressionParams(DeflateCompressor.instance, chunkLength, Collections.emptyMap());
+    }
+
+    public static CompressionParams lz4()
+    {
+        return lz4(null);
+    }
+
+    public static CompressionParams lz4(Integer chunkLength)
+    {
+        return new CompressionParams(LZ4Compressor.instance, chunkLength, Collections.emptyMap());
+    }
+
+    public CompressionParams(String sstableCompressorClass, Integer chunkLength, Map<String, String> otherOptions) throws ConfigurationException
+    {
+        this(createCompressor(parseCompressorClass(sstableCompressorClass), otherOptions), chunkLength, otherOptions);
+    }
+
+    private CompressionParams(ICompressor sstableCompressor, Integer chunkLength, Map<String, String> otherOptions) throws ConfigurationException
+    {
+        this.sstableCompressor = sstableCompressor;
+        this.chunkLength = chunkLength;
+        this.otherOptions = ImmutableMap.copyOf(otherOptions);
+        String chance = otherOptions.get(CRC_CHECK_CHANCE);
+        this.crcCheckChance = (chance == null) ? DEFAULT_CRC_CHECK_CHANCE : parseCrcCheckChance(chance);
+    }
+
+    public CompressionParams copy()
+    {
+        return new CompressionParams(sstableCompressor, chunkLength, otherOptions);
+    }
+
+    public void setLiveMetadata(final CFMetaData liveMetadata)
+    {
+        if (liveMetadata == null)
+            return;
+
+        this.liveMetadata = liveMetadata;
+    }
+
+    public void setCrcCheckChance(double crcCheckChance) throws ConfigurationException
+    {
+        validateCrcCheckChance(crcCheckChance);
+        this.crcCheckChance = crcCheckChance;
+
+        if (liveMetadata != null && this != liveMetadata.params.compression)
+            liveMetadata.params.compression.setCrcCheckChance(crcCheckChance);
+    }
+
+    /**
+     * Checks if compression is enabled.
+     * @return {@code true} if compression is enabled, {@code false} otherwise.
+     */
+    public boolean isEnabled()
+    {
+        return sstableCompressor != null;
+    }
+
+    /**
+     * Returns the SSTable compressor.
+     * @return the SSTable compressor or {@code null} if compression is disabled.
+     */
+    public ICompressor getSstableCompressor()
+    {
+        return sstableCompressor;
+    }
+
+    public ImmutableMap<String, String> getOtherOptions()
+    {
+        return otherOptions;
+    }
+
+    public double getCrcCheckChance()
+    {
+        return liveMetadata == null ? this.crcCheckChance : liveMetadata.params.compression.crcCheckChance;
+    }
+
+    private static double parseCrcCheckChance(String crcCheckChance) throws ConfigurationException
+    {
+        try
+        {
+            double chance = Double.parseDouble(crcCheckChance);
+            validateCrcCheckChance(chance);
+            return chance;
+        }
+        catch (NumberFormatException e)
+        {
+            throw new ConfigurationException("crc_check_chance should be a double");
+        }
+    }
+
+    private static void validateCrcCheckChance(double crcCheckChance) throws ConfigurationException
+    {
+        if (crcCheckChance < 0.0d || crcCheckChance > 1.0d)
+            throw new ConfigurationException("crc_check_chance should be between 0.0 and 1.0");
+    }
+
+    public int chunkLength()
+    {
+        return chunkLength == null ? DEFAULT_CHUNK_LENGTH : chunkLength;
+    }
+
+    private static Class<?> parseCompressorClass(String className) throws ConfigurationException
+    {
+        if (className == null || className.isEmpty())
+            return null;
+
+        className = className.contains(".") ? className : "org.apache.cassandra.io.compress." + className;
+        try
+        {
+            return Class.forName(className);
+        }
+        catch (Exception e)
+        {
+            throw new ConfigurationException("Could not create Compression for type " + className, e);
+        }
+    }
+
+    private static ICompressor createCompressor(Class<?> compressorClass, Map<String, String> compressionOptions) throws ConfigurationException
+    {
+        if (compressorClass == null)
+        {
+            if (!compressionOptions.isEmpty())
+                throw new ConfigurationException("Unknown compression options (" + compressionOptions.keySet() + ") since no compression class found");
+            return null;
+        }
+
+        try
+        {
+            Method method = compressorClass.getMethod("create", Map.class);
+            ICompressor compressor = (ICompressor)method.invoke(null, compressionOptions);
+            // Check for unknown options
+            AbstractSet<String> supportedOpts = Sets.union(compressor.supportedOptions(), GLOBAL_OPTIONS);
+            for (String provided : compressionOptions.keySet())
+                if (!supportedOpts.contains(provided))
+                    throw new ConfigurationException("Unknown compression options " + provided);
+            return compressor;
+        }
+        catch (NoSuchMethodException e)
+        {
+            throw new ConfigurationException("create method not found", e);
+        }
+        catch (SecurityException e)
+        {
+            throw new ConfigurationException("Access forbiden", e);
+        }
+        catch (IllegalAccessException e)
+        {
+            throw new ConfigurationException("Cannot access method create in " + compressorClass.getName(), e);
+        }
+        catch (InvocationTargetException e)
+        {
+            if (e.getTargetException() instanceof ConfigurationException)
+                throw (ConfigurationException) e.getTargetException();
+
+            Throwable cause = e.getCause() == null
+                            ? e
+                            : e.getCause();
+
+            throw new ConfigurationException(format("%s.create() threw an error: %s %s",
+                                                    compressorClass.getSimpleName(),
+                                                    cause.getClass().getName(),
+                                                    cause.getMessage()),
+                                             e);
+        }
+        catch (ExceptionInInitializerError e)
+        {
+            throw new ConfigurationException("Cannot initialize class " + compressorClass.getName());
+        }
+    }
+
+    public static ICompressor createCompressor(ParameterizedClass compression) throws ConfigurationException {
+        return createCompressor(parseCompressorClass(compression.class_name), copyOptions(compression.parameters));
+    }
+
+    private static Map<String, String> copyOptions(Map<? extends CharSequence, ? extends CharSequence> co)
+    {
+        if (co == null || co.isEmpty())
+            return Collections.<String, String>emptyMap();
+
+        Map<String, String> compressionOptions = new HashMap<>();
+        for (Map.Entry<? extends CharSequence, ? extends CharSequence> entry : co.entrySet())
+            compressionOptions.put(entry.getKey().toString(), entry.getValue().toString());
+        return compressionOptions;
+    }
+
+    /**
+     * Parse the chunk length (in KB) and returns it as bytes.
+     * 
+     * @param chLengthKB the length of the chunk to parse
+     * @return the chunk length in bytes
+     * @throws ConfigurationException if the chunk size is too large
+     */
+    private static Integer parseChunkLength(String chLengthKB) throws ConfigurationException
+    {
+        if (chLengthKB == null)
+            return null;
+
+        try
+        {
+            int parsed = Integer.parseInt(chLengthKB);
+            if (parsed > Integer.MAX_VALUE / 1024)
+                throw new ConfigurationException(format("Value of %s is too large (%s)", CHUNK_LENGTH_IN_KB,parsed));
+            return 1024 * parsed;
+        }
+        catch (NumberFormatException e)
+        {
+            throw new ConfigurationException("Invalid value for " + CHUNK_LENGTH_IN_KB, e);
+        }
+    }
+
+    /**
+     * Removes the chunk length option from the specified set of option.
+     *
+     * @param options the options
+     * @return the chunk length value
+     */
+    private static Integer removeChunkLength(Map<String, String> options)
+    {
+        if (options.containsKey(CHUNK_LENGTH_IN_KB))
+        {
+            if (options.containsKey(CHUNK_LENGTH_KB))
+            {
+                throw new ConfigurationException(format("The '%s' option must not be used if the chunk length is already specified by the '%s' option",
+                                                        CHUNK_LENGTH_KB,
+                                                        CHUNK_LENGTH_IN_KB));
+            }
+
+            return parseChunkLength(options.remove(CHUNK_LENGTH_IN_KB));
+        }
+
+        if (options.containsKey(CHUNK_LENGTH_KB))
+        {
+            if (options.containsKey(CHUNK_LENGTH_KB) && !hasLoggedChunkLengthWarning)
+            {
+                hasLoggedChunkLengthWarning = true;
+                logger.warn(format("The %s option has been deprecated. You should use %s instead",
+                                   CHUNK_LENGTH_KB,
+                                   CHUNK_LENGTH_IN_KB));
+            }
+
+            return parseChunkLength(options.remove(CHUNK_LENGTH_KB));
+        }
+
+        return null;
+    }
+
+    /**
+     * Returns {@code true} if the specified options contains the name of the compression class to be used,
+     * {@code false} otherwise.
+     *
+     * @param options the options
+     * @return {@code true} if the specified options contains the name of the compression class to be used,
+     * {@code false} otherwise.
+     */
+    public static boolean containsSstableCompressionClass(Map<String, String> options)
+    {
+        return options.containsKey(CLASS) || options.containsKey(SSTABLE_COMPRESSION);
+    }
+
+    /**
+     * Removes the option specifying the name of the compression class
+     *
+     * @param options the options
+     * @return the name of the compression class
+     */
+    private static String removeSstableCompressionClass(Map<String, String> options)
+    {
+        if (options.containsKey(CLASS))
+        {
+            if (options.containsKey(SSTABLE_COMPRESSION))
+                throw new ConfigurationException(format("The '%s' option must not be used if the compression algorithm is already specified by the '%s' option",
+                                                        SSTABLE_COMPRESSION,
+                                                        CLASS));
+
+            String clazz = options.remove(CLASS);
+            if (clazz.isEmpty())
+                throw new ConfigurationException(format("The '%s' option must not be empty. To disable compression use 'enabled' : false", CLASS));
+
+            return clazz;
+        }
+
+        if (options.containsKey(SSTABLE_COMPRESSION) && !hasLoggedSsTableCompressionWarning)
+        {
+            hasLoggedSsTableCompressionWarning = true;
+            logger.warn(format("The %s option has been deprecated. You should use %s instead",
+                               SSTABLE_COMPRESSION,
+                               CLASS));
+        }
+
+        return options.remove(SSTABLE_COMPRESSION);
+    }
+
+    /**
+     * Returns {@code true} if the options contains the {@code enabled} option and that its value is
+     * {@code true}, otherwise returns {@code false}.
+     *
+     * @param options the options
+     * @return {@code true} if the options contains the {@code enabled} option and that its value is
+     * {@code true}, otherwise returns {@code false}.
+     */
+    public static boolean isEnabled(Map<String, String> options)
+    {
+        String enabled = options.get(ENABLED);
+        return enabled == null || Boolean.parseBoolean(enabled);
+    }
+
+    /**
+     * Removes the {@code enabled} option from the specified options.
+     *
+     * @param options the options
+     * @return the value of the {@code enabled} option
+     */
+    private static boolean removeEnabled(Map<String, String> options)
+    {
+        String enabled = options.remove(ENABLED);
+        return enabled == null || Boolean.parseBoolean(enabled);
+    }
+
+    // chunkLength must be a power of 2 because we assume so when
+    // computing the chunk number from an uncompressed file offset (see
+    // CompressedRandomAccessReader.decompresseChunk())
+    public void validate() throws ConfigurationException
+    {
+        // if chunk length was not set (chunkLength == null), this is fine, default will be used
+        if (chunkLength != null)
+        {
+            if (chunkLength <= 0)
+                throw new ConfigurationException("Invalid negative or null " + CHUNK_LENGTH_IN_KB);
+
+            int c = chunkLength;
+            boolean found = false;
+            while (c != 0)
+            {
+                if ((c & 0x01) != 0)
+                {
+                    if (found)
+                        throw new ConfigurationException(CHUNK_LENGTH_IN_KB + " must be a power of 2");
+                    else
+                        found = true;
+                }
+                c >>= 1;
+            }
+        }
+
+        validateCrcCheckChance(crcCheckChance);
+    }
+
+    public Map<String, String> asMap()
+    {
+        if (!isEnabled())
+            return Collections.singletonMap(ENABLED, "false");
+
+        Map<String, String> options = new HashMap<>(otherOptions);
+        options.put(CLASS, sstableCompressor.getClass().getName());
+        options.put(CHUNK_LENGTH_IN_KB, chunkLengthInKB());
+
+        return options;
+    }
+
+    public String chunkLengthInKB()
+    {
+        return String.valueOf(chunkLength() / 1024);
+    }
+
+    @Override
+    public boolean equals(Object obj)
+    {
+        if (obj == this)
+        {
+            return true;
+        }
+        else if (obj == null || obj.getClass() != getClass())
+        {
+            return false;
+        }
+
+        CompressionParams cp = (CompressionParams) obj;
+        return new EqualsBuilder()
+            .append(sstableCompressor, cp.sstableCompressor)
+            .append(chunkLength(), cp.chunkLength())
+            .append(otherOptions, cp.otherOptions)
+            .isEquals();
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return new HashCodeBuilder(29, 1597)
+            .append(sstableCompressor)
+            .append(chunkLength())
+            .append(otherOptions)
+            .toHashCode();
+    }
+
+    static class Serializer implements IVersionedSerializer<CompressionParams>
+    {
+        public void serialize(CompressionParams parameters, DataOutputPlus out, int version) throws IOException
+        {
+            out.writeUTF(parameters.sstableCompressor.getClass().getSimpleName());
+            out.writeInt(parameters.otherOptions.size());
+            for (Map.Entry<String, String> entry : parameters.otherOptions.entrySet())
+            {
+                out.writeUTF(entry.getKey());
+                out.writeUTF(entry.getValue());
+            }
+            out.writeInt(parameters.chunkLength());
+        }
+
+        public CompressionParams deserialize(DataInputPlus in, int version) throws IOException
+        {
+            String compressorName = in.readUTF();
+            int optionCount = in.readInt();
+            Map<String, String> options = new HashMap<>();
+            for (int i = 0; i < optionCount; ++i)
+            {
+                String key = in.readUTF();
+                String value = in.readUTF();
+                options.put(key, value);
+            }
+            int chunkLength = in.readInt();
+            CompressionParams parameters;
+            try
+            {
+                parameters = new CompressionParams(compressorName, chunkLength, options);
+            }
+            catch (ConfigurationException e)
+            {
+                throw new RuntimeException("Cannot create CompressionParams for parameters", e);
+            }
+            return parameters;
+        }
+
+        public long serializedSize(CompressionParams parameters, int version)
+        {
+            long size = TypeSizes.sizeof(parameters.sstableCompressor.getClass().getSimpleName());
+            size += TypeSizes.sizeof(parameters.otherOptions.size());
+            for (Map.Entry<String, String> entry : parameters.otherOptions.entrySet())
+            {
+                size += TypeSizes.sizeof(entry.getKey());
+                size += TypeSizes.sizeof(entry.getValue());
+            }
+            size += TypeSizes.sizeof(parameters.chunkLength());
+            return size;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/KeyspaceParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/KeyspaceParams.java b/src/java/org/apache/cassandra/schema/KeyspaceParams.java
index a8de2bd..6cdf27f 100644
--- a/src/java/org/apache/cassandra/schema/KeyspaceParams.java
+++ b/src/java/org/apache/cassandra/schema/KeyspaceParams.java
@@ -17,15 +17,9 @@
  */
 package org.apache.cassandra.schema;
 
-import java.util.HashMap;
 import java.util.Map;
 
 import com.google.common.base.Objects;
-import com.google.common.collect.ImmutableMap;
-
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.locator.*;
-import org.apache.cassandra.service.StorageService;
 
 /**
  * An immutable class representing keyspace parameters (durability and replication).
@@ -47,9 +41,9 @@ public final class KeyspaceParams
     }
 
     public final boolean durableWrites;
-    public final Replication replication;
+    public final ReplicationParams replication;
 
-    public KeyspaceParams(boolean durableWrites, Replication replication)
+    public KeyspaceParams(boolean durableWrites, ReplicationParams replication)
     {
         this.durableWrites = durableWrites;
         this.replication = replication;
@@ -57,22 +51,22 @@ public final class KeyspaceParams
 
     public static KeyspaceParams create(boolean durableWrites, Map<String, String> replication)
     {
-        return new KeyspaceParams(durableWrites, Replication.fromMap(replication));
+        return new KeyspaceParams(durableWrites, ReplicationParams.fromMap(replication));
     }
 
     public static KeyspaceParams local()
     {
-        return new KeyspaceParams(true, Replication.local());
+        return new KeyspaceParams(true, ReplicationParams.local());
     }
 
     public static KeyspaceParams simple(int replicationFactor)
     {
-        return new KeyspaceParams(true, Replication.simple(replicationFactor));
+        return new KeyspaceParams(true, ReplicationParams.simple(replicationFactor));
     }
 
     public static KeyspaceParams simpleTransient(int replicationFactor)
     {
-        return new KeyspaceParams(false, Replication.simple(replicationFactor));
+        return new KeyspaceParams(false, ReplicationParams.simple(replicationFactor));
     }
 
     public void validate(String name)
@@ -108,81 +102,4 @@ public final class KeyspaceParams
                       .add(Option.REPLICATION.toString(), replication)
                       .toString();
     }
-
-    public static final class Replication
-    {
-        public static String CLASS = "class";
-
-        public final Class<? extends AbstractReplicationStrategy> klass;
-        public final ImmutableMap<String, String> options;
-
-        private Replication(Class<? extends AbstractReplicationStrategy> klass, Map<String, String> options)
-        {
-            this.klass = klass;
-            this.options = ImmutableMap.copyOf(options);
-        }
-
-        private static Replication local()
-        {
-            return new Replication(LocalStrategy.class, ImmutableMap.of());
-        }
-
-        private static Replication simple(int replicationFactor)
-        {
-            return new Replication(SimpleStrategy.class, ImmutableMap.of("replication_factor", Integer.toString(replicationFactor)));
-        }
-
-        public void validate(String name)
-        {
-            // Attempt to instantiate the ARS, which will throw a ConfigurationException if the options aren't valid.
-            TokenMetadata tmd = StorageService.instance.getTokenMetadata();
-            IEndpointSnitch eps = DatabaseDescriptor.getEndpointSnitch();
-            AbstractReplicationStrategy.validateReplicationStrategy(name, klass, tmd, eps, options);
-        }
-
-        public static Replication fromMap(Map<String, String> map)
-        {
-            Map<String, String> options = new HashMap<>(map);
-            String className = options.remove(CLASS);
-            Class<? extends AbstractReplicationStrategy> klass = AbstractReplicationStrategy.getClass(className);
-            return new Replication(klass, options);
-        }
-
-        public Map<String, String> asMap()
-        {
-            Map<String, String> map = new HashMap<>(options);
-            map.put(CLASS, klass.getName());
-            return map;
-        }
-
-        @Override
-        public boolean equals(Object o)
-        {
-            if (this == o)
-                return true;
-
-            if (!(o instanceof Replication))
-                return false;
-
-            Replication r = (Replication) o;
-
-            return klass.equals(r.klass) && options.equals(r.options);
-        }
-
-        @Override
-        public int hashCode()
-        {
-            return Objects.hashCode(klass, options);
-        }
-
-        @Override
-        public String toString()
-        {
-            Objects.ToStringHelper helper = Objects.toStringHelper(this);
-            helper.add(CLASS, klass.getName());
-            for (Map.Entry<String, String> entry : options.entrySet())
-                helper.add(entry.getKey(), entry.getValue());
-            return helper.toString();
-        }
-    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
index 41da481..8dac03b 100644
--- a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
+++ b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
@@ -26,18 +26,17 @@ import com.google.common.collect.ImmutableList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.cql3.functions.FunctionName;
 import org.apache.cassandra.cql3.functions.UDAggregate;
 import org.apache.cassandra.cql3.functions.UDFunction;
 import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.rows.RowIterator;
 import org.apache.cassandra.db.rows.UnfilteredRowIterators;
 import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.io.compress.CompressionParameters;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.OpOrder;
@@ -189,7 +188,7 @@ public final class LegacySchemaMigrator
 
         Map<String, String> replication = new HashMap<>();
         replication.putAll(fromJsonMap(row.getString("strategy_options")));
-        replication.put(KeyspaceParams.Replication.CLASS, row.getString("strategy_class"));
+        replication.put(ReplicationParams.CLASS, row.getString("strategy_class"));
 
         return KeyspaceParams.create(durableWrites, replication);
     }
@@ -317,41 +316,86 @@ public final class LegacySchemaMigrator
                                            columnDefs,
                                            DatabaseDescriptor.getPartitioner());
 
-        cfm.readRepairChance(tableRow.getDouble("read_repair_chance"));
-        cfm.dcLocalReadRepairChance(tableRow.getDouble("local_read_repair_chance"));
-        cfm.gcGraceSeconds(tableRow.getInt("gc_grace_seconds"));
-        cfm.minCompactionThreshold(tableRow.getInt("min_compaction_threshold"));
-        cfm.maxCompactionThreshold(tableRow.getInt("max_compaction_threshold"));
-        if (tableRow.has("comment"))
-            cfm.comment(tableRow.getString("comment"));
-        if (tableRow.has("memtable_flush_period_in_ms"))
-            cfm.memtableFlushPeriod(tableRow.getInt("memtable_flush_period_in_ms"));
-        cfm.caching(CachingOptions.fromString(tableRow.getString("caching")));
-        if (tableRow.has("default_time_to_live"))
-            cfm.defaultTimeToLive(tableRow.getInt("default_time_to_live"));
-        if (tableRow.has("speculative_retry"))
-            cfm.speculativeRetry(CFMetaData.SpeculativeRetry.fromString(tableRow.getString("speculative_retry")));
-        cfm.compactionStrategyClass(CFMetaData.createCompactionStrategy(tableRow.getString("compaction_strategy_class")));
-        cfm.compressionParameters(CompressionParameters.fromMap(fromJsonMap(tableRow.getString("compression_parameters"))));
-        cfm.compactionStrategyOptions(fromJsonMap(tableRow.getString("compaction_strategy_options")));
-
-        if (tableRow.has("min_index_interval"))
-            cfm.minIndexInterval(tableRow.getInt("min_index_interval"));
-
-        if (tableRow.has("max_index_interval"))
-            cfm.maxIndexInterval(tableRow.getInt("max_index_interval"));
-
-        if (tableRow.has("bloom_filter_fp_chance"))
-            cfm.bloomFilterFpChance(tableRow.getDouble("bloom_filter_fp_chance"));
-        else
-            cfm.bloomFilterFpChance(cfm.getBloomFilterFpChance());
-
         if (tableRow.has("dropped_columns"))
             addDroppedColumns(cfm, rawComparator, tableRow.getMap("dropped_columns", UTF8Type.instance, LongType.instance));
 
-        cfm.triggers(createTriggersFromTriggerRows(triggerRows));
+        return cfm.params(decodeTableParams(tableRow))
+                  .triggers(createTriggersFromTriggerRows(triggerRows));
+    }
+
+    private static TableParams decodeTableParams(UntypedResultSet.Row row)
+    {
+        TableParams.Builder params = TableParams.builder();
+
+        params.readRepairChance(row.getDouble("read_repair_chance"))
+              .dcLocalReadRepairChance(row.getDouble("local_read_repair_chance"))
+              .gcGraceSeconds(row.getInt("gc_grace_seconds"));
+
+        if (row.has("comment"))
+            params.comment(row.getString("comment"));
+
+        if (row.has("memtable_flush_period_in_ms"))
+            params.memtableFlushPeriodInMs(row.getInt("memtable_flush_period_in_ms"));
+
+        params.caching(CachingParams.fromMap(fromJsonMap(row.getString("caching"))));
+
+        if (row.has("default_time_to_live"))
+            params.defaultTimeToLive(row.getInt("default_time_to_live"));
+
+        if (row.has("speculative_retry"))
+            params.speculativeRetry(SpeculativeRetryParam.fromString(row.getString("speculative_retry")));
+
+        params.compression(CompressionParams.fromMap(fromJsonMap(row.getString("compression_parameters"))));
+
+        params.compaction(compactionFromRow(row));
+
+        if (row.has("min_index_interval"))
+            params.minIndexInterval(row.getInt("min_index_interval"));
+
+        if (row.has("max_index_interval"))
+            params.maxIndexInterval(row.getInt("max_index_interval"));
+
+        if (row.has("bloom_filter_fp_chance"))
+            params.bloomFilterFpChance(row.getDouble("bloom_filter_fp_chance"));
+
+        return params.build();
+    }
 
-        return cfm;
+    /*
+     * The method is needed - to migrate max_compaction_threshold and min_compaction_threshold
+     * to the compaction map, where they belong.
+     *
+     * We must use reflection to validate the options because not every compaction strategy respects and supports
+     * the threshold params (LCS doesn't, STCS and DTCS do).
+     */
+    @SuppressWarnings("unchecked")
+    private static CompactionParams compactionFromRow(UntypedResultSet.Row row)
+    {
+        Class<? extends AbstractCompactionStrategy> klass =
+            CFMetaData.createCompactionStrategy(row.getString("compaction_strategy_class"));
+        Map<String, String> options = fromJsonMap(row.getString("compaction_strategy_options"));
+
+        int minThreshold = row.getInt("min_compaction_threshold");
+        int maxThreshold = row.getInt("max_compaction_threshold");
+
+        Map<String, String> optionsWithThresholds = new HashMap<>(options);
+        optionsWithThresholds.putIfAbsent(CompactionParams.Option.MIN_THRESHOLD.toString(), Integer.toString(minThreshold));
+        optionsWithThresholds.putIfAbsent(CompactionParams.Option.MAX_THRESHOLD.toString(), Integer.toString(maxThreshold));
+
+        try
+        {
+            Map<String, String> unrecognizedOptions =
+                (Map<String, String>) klass.getMethod("validateOptions", Map.class).invoke(null, optionsWithThresholds);
+
+            if (unrecognizedOptions.isEmpty())
+                options = optionsWithThresholds;
+        }
+        catch (Exception e)
+        {
+            throw new RuntimeException(e);
+        }
+
+        return CompactionParams.create(klass, options);
     }
 
     // Should only be called on compact tables
@@ -627,10 +671,7 @@ public final class LegacySchemaMigrator
                               SystemKeyspace.NAME,
                               SystemKeyspace.LEGACY_FUNCTIONS);
         HashMultimap<String, List<String>> functionSignatures = HashMultimap.create();
-        query(query, keyspaceName).forEach(row ->
-        {
-            functionSignatures.put(row.getString("function_name"), row.getList("signature", UTF8Type.instance));
-        });
+        query(query, keyspaceName).forEach(row -> functionSignatures.put(row.getString("function_name"), row.getList("signature", UTF8Type.instance)));
 
         Collection<Function> functions = new ArrayList<>();
         functionSignatures.entries().forEach(pair -> functions.add(readFunction(keyspaceName, pair.getKey(), pair.getValue())));
@@ -699,10 +740,7 @@ public final class LegacySchemaMigrator
                               SystemKeyspace.NAME,
                               SystemKeyspace.LEGACY_AGGREGATES);
         HashMultimap<String, List<String>> aggregateSignatures = HashMultimap.create();
-        query(query, keyspaceName).forEach(row ->
-        {
-            aggregateSignatures.put(row.getString("aggregate_name"), row.getList("signature", UTF8Type.instance));
-        });
+        query(query, keyspaceName).forEach(row -> aggregateSignatures.put(row.getString("aggregate_name"), row.getList("signature", UTF8Type.instance)));
 
         Collection<Aggregate> aggregates = new ArrayList<>();
         aggregateSignatures.entries().forEach(pair -> aggregates.add(readAggregate(keyspaceName, pair.getKey(), pair.getValue())));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/ReplicationParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/ReplicationParams.java b/src/java/org/apache/cassandra/schema/ReplicationParams.java
new file mode 100644
index 0000000..cdeb4c2
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/ReplicationParams.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.locator.*;
+import org.apache.cassandra.service.StorageService;
+
+public final class ReplicationParams
+{
+    public static final String CLASS = "class";
+
+    public final Class<? extends AbstractReplicationStrategy> klass;
+    public final ImmutableMap<String, String> options;
+
+    private ReplicationParams(Class<? extends AbstractReplicationStrategy> klass, Map<String, String> options)
+    {
+        this.klass = klass;
+        this.options = ImmutableMap.copyOf(options);
+    }
+
+    static ReplicationParams local()
+    {
+        return new ReplicationParams(LocalStrategy.class, ImmutableMap.of());
+    }
+
+    static ReplicationParams simple(int replicationFactor)
+    {
+        return new ReplicationParams(SimpleStrategy.class, ImmutableMap.of("replication_factor", Integer.toString(replicationFactor)));
+    }
+
+    public void validate(String name)
+    {
+        // Attempt to instantiate the ARS, which will throw a ConfigurationException if the options aren't valid.
+        TokenMetadata tmd = StorageService.instance.getTokenMetadata();
+        IEndpointSnitch eps = DatabaseDescriptor.getEndpointSnitch();
+        AbstractReplicationStrategy.validateReplicationStrategy(name, klass, tmd, eps, options);
+    }
+
+    public static ReplicationParams fromMap(Map<String, String> map)
+    {
+        Map<String, String> options = new HashMap<>(map);
+        String className = options.remove(CLASS);
+        Class<? extends AbstractReplicationStrategy> klass = AbstractReplicationStrategy.getClass(className);
+        return new ReplicationParams(klass, options);
+    }
+
+    public Map<String, String> asMap()
+    {
+        Map<String, String> map = new HashMap<>(options);
+        map.put(CLASS, klass.getName());
+        return map;
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof ReplicationParams))
+            return false;
+
+        ReplicationParams r = (ReplicationParams) o;
+
+        return klass.equals(r.klass) && options.equals(r.options);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(klass, options);
+    }
+
+    @Override
+    public String toString()
+    {
+        MoreObjects.ToStringHelper helper = MoreObjects.toStringHelper(this);
+        helper.add(CLASS, klass.getName());
+        for (Map.Entry<String, String> entry : options.entrySet())
+            helper.add(entry.getKey(), entry.getValue());
+        return helper.toString();
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/SchemaKeyspace.java b/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
index ba6a2e1..5791db7 100644
--- a/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
+++ b/src/java/org/apache/cassandra/schema/SchemaKeyspace.java
@@ -31,21 +31,18 @@ import com.google.common.collect.Maps;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.cql3.functions.*;
-import org.apache.cassandra.cql3.statements.CFPropDefs;
+import org.apache.cassandra.db.ClusteringComparator;
 import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.InvalidRequestException;
-import org.apache.cassandra.io.compress.CompressionParameters;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.concurrent.OpOrder;
@@ -819,20 +816,9 @@ public final class SchemaKeyspace
     {
         RowUpdateBuilder adder = new RowUpdateBuilder(Tables, timestamp, mutation).clustering(table.cfName);
 
-        adder.add("bloom_filter_fp_chance", table.getBloomFilterFpChance())
-             .add("comment", table.getComment())
-             .add("dclocal_read_repair_chance", table.getDcLocalReadRepairChance())
-             .add("default_time_to_live", table.getDefaultTimeToLive())
-             .add("gc_grace_seconds", table.getGcGraceSeconds())
-             .add("id", table.cfId)
-             .add("max_index_interval", table.getMaxIndexInterval())
-             .add("memtable_flush_period_in_ms", table.getMemtableFlushPeriod())
-             .add("min_index_interval", table.getMinIndexInterval())
-             .add("read_repair_chance", table.getReadRepairChance())
-             .add("speculative_retry", table.getSpeculativeRetry().toString())
-             .map("caching", table.getCaching().asMap())
-             .map("compaction", buildCompactionMap(table))
-             .map("compression", table.compressionParameters().asMap())
+        addTableParamsToSchemaMutation(table.params, adder);
+
+        adder.add("id", table.cfId)
              .set("flags", CFMetaData.flagsToStrings(table.flags()))
              .build();
 
@@ -852,38 +838,21 @@ public final class SchemaKeyspace
         }
     }
 
-    /*
-     * The method is needed - temporarily - to migrate max_compaction_threshold and min_compaction_threshold
-     * to the compaction map, where they belong.
-     *
-     * We must use reflection to validate the options because not every compaction strategy respects and supports
-     * the threshold params (LCS doesn't, STCS and DTCS don't).
-     */
-    @SuppressWarnings("unchecked")
-    private static Map<String, String> buildCompactionMap(CFMetaData cfm)
+    private static void addTableParamsToSchemaMutation(TableParams params, RowUpdateBuilder adder)
     {
-        Map<String, String> options = new HashMap<>(cfm.compactionStrategyOptions);
-
-        Map<String, String> optionsWithThresholds = new HashMap<>(options);
-        options.putIfAbsent(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD, Integer.toString(cfm.getMinCompactionThreshold()));
-        options.putIfAbsent(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD, Integer.toString(cfm.getMaxCompactionThreshold()));
-
-        try
-        {
-            Map<String, String> unrecognizedOptions = (Map<String, String>) cfm.compactionStrategyClass
-                                                                               .getMethod("validateOptions", Map.class)
-                                                                               .invoke(null, optionsWithThresholds);
-            if (unrecognizedOptions.isEmpty())
-                options = optionsWithThresholds;
-        }
-        catch (Exception e)
-        {
-            throw new RuntimeException(e);
-        }
-
-        options.put("class", cfm.compactionStrategyClass.getName());
-
-        return options;
+        adder.add("bloom_filter_fp_chance", params.bloomFilterFpChance)
+             .add("comment", params.comment)
+             .add("dclocal_read_repair_chance", params.dcLocalReadRepairChance)
+             .add("default_time_to_live", params.defaultTimeToLive)
+             .add("gc_grace_seconds", params.gcGraceSeconds)
+             .add("max_index_interval", params.maxIndexInterval)
+             .add("memtable_flush_period_in_ms", params.memtableFlushPeriodInMs)
+             .add("min_index_interval", params.minIndexInterval)
+             .add("read_repair_chance", params.readRepairChance)
+             .add("speculative_retry", params.speculativeRetry.toString())
+             .map("caching", params.caching.asMap())
+             .map("compaction", params.compaction.asMap())
+             .map("compression", params.compression.asMap());
     }
 
     public static Mutation makeUpdateTableMutation(KeyspaceMetadata keyspace,
@@ -1085,49 +1054,38 @@ public final class SchemaKeyspace
         boolean isCounter = flags.contains(CFMetaData.Flag.COUNTER);
         boolean isDense = flags.contains(CFMetaData.Flag.DENSE);
         boolean isCompound = flags.contains(CFMetaData.Flag.COMPOUND);
-        boolean isMaterializedView = flags.contains(CFMetaData.Flag.MATERIALIZEDVIEW);
-
-        CFMetaData cfm = CFMetaData.create(keyspace,
-                                           table,
-                                           id,
-                                           isDense,
-                                           isCompound,
-                                           isSuper,
-                                           isCounter,
-                                           isMaterializedView,
-                                           columns,
-                                           DatabaseDescriptor.getPartitioner());
-
-        Map<String, String> compaction = new HashMap<>(row.getTextMap("compaction"));
-        Class<? extends AbstractCompactionStrategy> compactionStrategyClass =
-            CFMetaData.createCompactionStrategy(compaction.remove("class"));
-
-        int minCompactionThreshold = compaction.containsKey(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD)
-                                   ? Integer.parseInt(compaction.get(CFPropDefs.KW_MINCOMPACTIONTHRESHOLD))
-                                   : CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD;
-
-        int maxCompactionThreshold = compaction.containsKey(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD)
-                                   ? Integer.parseInt(compaction.get(CFPropDefs.KW_MAXCOMPACTIONTHRESHOLD))
-                                   : CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD;
-
-        cfm.bloomFilterFpChance(row.getDouble("bloom_filter_fp_chance"))
-           .caching(CachingOptions.fromMap(row.getTextMap("caching")))
-           .comment(row.getString("comment"))
-           .compactionStrategyClass(compactionStrategyClass)
-           .compactionStrategyOptions(compaction)
-           .compressionParameters(CompressionParameters.fromMap(row.getTextMap("compression")))
-           .dcLocalReadRepairChance(row.getDouble("dclocal_read_repair_chance"))
-           .defaultTimeToLive(row.getInt("default_time_to_live"))
-           .gcGraceSeconds(row.getInt("gc_grace_seconds"))
-           .maxCompactionThreshold(maxCompactionThreshold)
-           .maxIndexInterval(row.getInt("max_index_interval"))
-           .memtableFlushPeriod(row.getInt("memtable_flush_period_in_ms"))
-           .minCompactionThreshold(minCompactionThreshold)
-           .minIndexInterval(row.getInt("min_index_interval"))
-           .readRepairChance(row.getDouble("read_repair_chance"))
-           .speculativeRetry(CFMetaData.SpeculativeRetry.fromString(row.getString("speculative_retry")));
-
-        return cfm;
+        boolean isMaterializedView = flags.contains(CFMetaData.Flag.VIEW);
+
+        return CFMetaData.create(keyspace,
+                                 table,
+                                 id,
+                                 isDense,
+                                 isCompound,
+                                 isSuper,
+                                 isCounter,
+                                 isMaterializedView,
+                                 columns,
+                                 DatabaseDescriptor.getPartitioner())
+                         .params(createTableParamsFromRow(row));
+    }
+
+    private static TableParams createTableParamsFromRow(UntypedResultSet.Row row)
+    {
+        return TableParams.builder()
+                          .bloomFilterFpChance(row.getDouble("bloom_filter_fp_chance"))
+                          .caching(CachingParams.fromMap(row.getTextMap("caching")))
+                          .comment(row.getString("comment"))
+                          .compaction(CompactionParams.fromMap(row.getTextMap("compaction")))
+                          .compression(CompressionParams.fromMap(row.getTextMap("compression")))
+                          .dcLocalReadRepairChance(row.getDouble("dclocal_read_repair_chance"))
+                          .defaultTimeToLive(row.getInt("default_time_to_live"))
+                          .gcGraceSeconds(row.getInt("gc_grace_seconds"))
+                          .maxIndexInterval(row.getInt("max_index_interval"))
+                          .memtableFlushPeriodInMs(row.getInt("memtable_flush_period_in_ms"))
+                          .minIndexInterval(row.getInt("min_index_interval"))
+                          .readRepairChance(row.getDouble("read_repair_chance"))
+                          .speculativeRetry(SpeculativeRetryParam.fromString(row.getString("speculative_retry")))
+                          .build();
     }
 
     /*

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/SpeculativeRetryParam.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/SpeculativeRetryParam.java b/src/java/org/apache/cassandra/schema/SpeculativeRetryParam.java
new file mode 100644
index 0000000..58c6375
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/SpeculativeRetryParam.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.text.DecimalFormat;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Objects;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+import static java.lang.String.format;
+
+public final class SpeculativeRetryParam
+{
+    public enum Kind
+    {
+        NONE, CUSTOM, PERCENTILE, ALWAYS
+    }
+
+    public static final SpeculativeRetryParam NONE = none();
+    public static final SpeculativeRetryParam ALWAYS = always();
+    public static final SpeculativeRetryParam DEFAULT = percentile(99);
+
+    private final Kind kind;
+    private final double value;
+
+    // pre-processed (divided by 100 for PERCENTILE), multiplied by 1M for CUSTOM (to nanos)
+    private final double threshold;
+
+    private SpeculativeRetryParam(Kind kind, double value)
+    {
+        this.kind = kind;
+        this.value = value;
+
+        if (kind == Kind.PERCENTILE)
+            threshold = value / 100;
+        else if (kind == Kind.CUSTOM)
+            threshold = TimeUnit.MILLISECONDS.toNanos((long) value);
+        else
+            threshold = value;
+    }
+
+    public Kind kind()
+    {
+        return kind;
+    }
+
+    public double threshold()
+    {
+        return threshold;
+    }
+
+    public static SpeculativeRetryParam none()
+    {
+        return new SpeculativeRetryParam(Kind.NONE, 0);
+    }
+
+    public static SpeculativeRetryParam always()
+    {
+        return new SpeculativeRetryParam(Kind.ALWAYS, 0);
+    }
+
+    public static SpeculativeRetryParam custom(double value)
+    {
+        return new SpeculativeRetryParam(Kind.CUSTOM, value);
+    }
+
+    public static SpeculativeRetryParam percentile(double value)
+    {
+        return new SpeculativeRetryParam(Kind.PERCENTILE, value);
+    }
+
+    public static SpeculativeRetryParam fromString(String value)
+    {
+        if (value.toLowerCase().endsWith("ms"))
+        {
+            try
+            {
+                return custom(Double.parseDouble(value.substring(0, value.length() - "ms".length())));
+            }
+            catch (IllegalArgumentException e)
+            {
+                throw new ConfigurationException(format("Invalid value %s for option '%s'", value, TableParams.Option.SPECULATIVE_RETRY));
+            }
+        }
+
+        if (value.toUpperCase().endsWith(Kind.PERCENTILE.toString()))
+        {
+            double threshold;
+            try
+            {
+                threshold = Double.parseDouble(value.substring(0, value.length() - Kind.PERCENTILE.toString().length()));
+            }
+            catch (IllegalArgumentException e)
+            {
+                throw new ConfigurationException(format("Invalid value %s for option '%s'", value, TableParams.Option.SPECULATIVE_RETRY));
+            }
+
+            if (threshold >= 0.0 && threshold <= 100.0)
+                return percentile(threshold);
+
+            throw new ConfigurationException(format("Invalid value %s for PERCENTILE option '%s': must be between 0.0 and 100.0",
+                                                    value,
+                                                    TableParams.Option.SPECULATIVE_RETRY));
+        }
+
+        if (value.equals(Kind.NONE.toString()))
+            return NONE;
+
+        if (value.equals(Kind.ALWAYS.toString()))
+            return ALWAYS;
+
+        throw new ConfigurationException(format("Invalid value %s for option '%s'", value, TableParams.Option.SPECULATIVE_RETRY));
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (!(o instanceof SpeculativeRetryParam))
+            return false;
+        SpeculativeRetryParam srp = (SpeculativeRetryParam) o;
+        return kind == srp.kind && threshold == srp.threshold;
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(kind, threshold);
+    }
+
+    @Override
+    public String toString()
+    {
+        switch (kind)
+        {
+            case CUSTOM:
+                return format("%sms", value);
+            case PERCENTILE:
+                return format("%sPERCENTILE", new DecimalFormat("#.#####").format(value));
+            default: // NONE and ALWAYS
+                return kind.toString();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/schema/TableParams.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/TableParams.java b/src/java/org/apache/cassandra/schema/TableParams.java
new file mode 100644
index 0000000..3b3a88e
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/TableParams.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
+
+import org.apache.cassandra.exceptions.ConfigurationException;
+
+import static java.lang.String.format;
+
+public final class TableParams
+{
+    public static final TableParams DEFAULT = TableParams.builder().build();
+
+    public enum Option
+    {
+        BLOOM_FILTER_FP_CHANCE,
+        CACHING,
+        COMMENT,
+        COMPACTION,
+        COMPRESSION,
+        DCLOCAL_READ_REPAIR_CHANCE,
+        DEFAULT_TIME_TO_LIVE,
+        GC_GRACE_SECONDS,
+        MAX_INDEX_INTERVAL,
+        MEMTABLE_FLUSH_PERIOD_IN_MS,
+        MIN_INDEX_INTERVAL,
+        READ_REPAIR_CHANCE,
+        SPECULATIVE_RETRY;
+
+        @Override
+        public String toString()
+        {
+            return name().toLowerCase();
+        }
+    }
+
+    public static final String DEFAULT_COMMENT = "";
+    public static final double DEFAULT_READ_REPAIR_CHANCE = 0.0;
+    public static final double DEFAULT_DCLOCAL_READ_REPAIR_CHANCE = 0.1;
+    public static final int DEFAULT_GC_GRACE_SECONDS = 864000; // 10 days
+    public static final int DEFAULT_DEFAULT_TIME_TO_LIVE = 0;
+    public static final int DEFAULT_MEMTABLE_FLUSH_PERIOD_IN_MS = 0;
+    public static final int DEFAULT_MIN_INDEX_INTERVAL = 128;
+    public static final int DEFAULT_MAX_INDEX_INTERVAL = 2048;
+
+    public final String comment;
+    public final double readRepairChance;
+    public final double dcLocalReadRepairChance;
+    public final double bloomFilterFpChance;
+    public final int gcGraceSeconds;
+    public final int defaultTimeToLive;
+    public final int memtableFlushPeriodInMs;
+    public final int minIndexInterval;
+    public final int maxIndexInterval;
+    public final SpeculativeRetryParam speculativeRetry;
+    public final CachingParams caching;
+    public final CompactionParams compaction;
+    public final CompressionParams compression;
+
+    private TableParams(Builder builder)
+    {
+        comment = builder.comment;
+        readRepairChance = builder.readRepairChance;
+        dcLocalReadRepairChance = builder.dcLocalReadRepairChance;
+        bloomFilterFpChance = builder.bloomFilterFpChance == null
+                            ? builder.compaction.defaultBloomFilterFbChance()
+                            : builder.bloomFilterFpChance;
+        gcGraceSeconds = builder.gcGraceSeconds;
+        defaultTimeToLive = builder.defaultTimeToLive;
+        memtableFlushPeriodInMs = builder.memtableFlushPeriodInMs;
+        minIndexInterval = builder.minIndexInterval;
+        maxIndexInterval = builder.maxIndexInterval;
+        speculativeRetry = builder.speculativeRetry;
+        caching = builder.caching;
+        compaction = builder.compaction;
+        compression = builder.compression;
+    }
+
+    public static Builder builder()
+    {
+        return new Builder();
+    }
+
+    public static Builder builder(TableParams params)
+    {
+        return new Builder().bloomFilterFpChance(params.bloomFilterFpChance)
+                            .caching(params.caching)
+                            .comment(params.comment)
+                            .compaction(params.compaction)
+                            .compression(params.compression)
+                            .dcLocalReadRepairChance(params.dcLocalReadRepairChance)
+                            .defaultTimeToLive(params.defaultTimeToLive)
+                            .gcGraceSeconds(params.gcGraceSeconds)
+                            .maxIndexInterval(params.maxIndexInterval)
+                            .memtableFlushPeriodInMs(params.memtableFlushPeriodInMs)
+                            .minIndexInterval(params.minIndexInterval)
+                            .readRepairChance(params.readRepairChance)
+                            .speculativeRetry(params.speculativeRetry);
+    }
+
+    public void validate()
+    {
+        compaction.validate();
+        compression.validate();
+
+        if (bloomFilterFpChance <= 0 || bloomFilterFpChance > 1)
+        {
+            fail("%s must be larger than 0.0 and less than or equal to 1.0 (got %s)",
+                 Option.BLOOM_FILTER_FP_CHANCE,
+                 bloomFilterFpChance);
+        }
+
+        if (dcLocalReadRepairChance < 0 || dcLocalReadRepairChance > 1.0)
+        {
+            fail("%s must be larger than or equal to 0 and smaller than or equal to 1.0 (got %s)",
+                 Option.DCLOCAL_READ_REPAIR_CHANCE,
+                 dcLocalReadRepairChance);
+        }
+
+        if (readRepairChance < 0 || readRepairChance > 1.0)
+        {
+            fail("%s must be larger than or equal to 0 and smaller than or equal to 1.0 (got %s)",
+                 Option.READ_REPAIR_CHANCE,
+                 readRepairChance);
+        }
+
+        if (defaultTimeToLive < 0)
+            fail("%s must be greater than or equal to 0 (got %s)", Option.DEFAULT_TIME_TO_LIVE, defaultTimeToLive);
+
+        if (gcGraceSeconds < 0)
+            fail("%s must be greater than or equal to 0 (got %s)", Option.GC_GRACE_SECONDS, gcGraceSeconds);
+
+        if (minIndexInterval < 1)
+            fail("%s must be greater than or equal to 1 (got %s)", Option.MIN_INDEX_INTERVAL, minIndexInterval);
+
+        if (maxIndexInterval < minIndexInterval)
+        {
+            fail("%s must be greater than or equal to %s (%s) (got %s)",
+                 Option.MAX_INDEX_INTERVAL,
+                 Option.MIN_INDEX_INTERVAL,
+                 minIndexInterval,
+                 maxIndexInterval);
+        }
+
+        if (memtableFlushPeriodInMs < 0)
+            fail("%s must be greater than or equal to 0 (got %s)", Option.MEMTABLE_FLUSH_PERIOD_IN_MS, memtableFlushPeriodInMs);
+    }
+
+    private static void fail(String format, Object... args)
+    {
+        throw new ConfigurationException(format(format, args));
+    }
+
+    @Override
+    public boolean equals(Object o)
+    {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof TableParams))
+            return false;
+
+        TableParams p = (TableParams) o;
+
+        return comment.equals(p.comment)
+            && readRepairChance == p.readRepairChance
+            && dcLocalReadRepairChance == p.dcLocalReadRepairChance
+            && bloomFilterFpChance == p.bloomFilterFpChance
+            && gcGraceSeconds == p.gcGraceSeconds
+            && defaultTimeToLive == p.defaultTimeToLive
+            && memtableFlushPeriodInMs == p.memtableFlushPeriodInMs
+            && minIndexInterval == p.minIndexInterval
+            && maxIndexInterval == p.maxIndexInterval
+            && speculativeRetry.equals(p.speculativeRetry)
+            && caching.equals(p.caching)
+            && compaction.equals(p.compaction)
+            && compression.equals(p.compression);
+    }
+
+    @Override
+    public int hashCode()
+    {
+        return Objects.hashCode(comment,
+                                readRepairChance,
+                                dcLocalReadRepairChance,
+                                bloomFilterFpChance,
+                                gcGraceSeconds,
+                                defaultTimeToLive,
+                                memtableFlushPeriodInMs,
+                                minIndexInterval,
+                                maxIndexInterval,
+                                speculativeRetry,
+                                caching,
+                                compaction,
+                                compression);
+    }
+
+    @Override
+    public String toString()
+    {
+        return MoreObjects.toStringHelper(this)
+                          .add(Option.COMMENT.toString(), comment)
+                          .add(Option.READ_REPAIR_CHANCE.toString(), readRepairChance)
+                          .add(Option.DCLOCAL_READ_REPAIR_CHANCE.toString(), dcLocalReadRepairChance)
+                          .add(Option.BLOOM_FILTER_FP_CHANCE.toString(), bloomFilterFpChance)
+                          .add(Option.GC_GRACE_SECONDS.toString(), gcGraceSeconds)
+                          .add(Option.DEFAULT_TIME_TO_LIVE.toString(), defaultTimeToLive)
+                          .add(Option.MEMTABLE_FLUSH_PERIOD_IN_MS.toString(), memtableFlushPeriodInMs)
+                          .add(Option.MIN_INDEX_INTERVAL.toString(), minIndexInterval)
+                          .add(Option.MAX_INDEX_INTERVAL.toString(), maxIndexInterval)
+                          .add(Option.SPECULATIVE_RETRY.toString(), speculativeRetry)
+                          .add(Option.CACHING.toString(), caching)
+                          .add(Option.COMPACTION.toString(), compaction)
+                          .add(Option.COMPRESSION.toString(), compression)
+                          .toString();
+    }
+
+    public static final class Builder
+    {
+        private String comment = DEFAULT_COMMENT;
+        private double readRepairChance = DEFAULT_READ_REPAIR_CHANCE;
+        private double dcLocalReadRepairChance = DEFAULT_DCLOCAL_READ_REPAIR_CHANCE;
+        private Double bloomFilterFpChance;
+        private int gcGraceSeconds = DEFAULT_GC_GRACE_SECONDS;
+        private int defaultTimeToLive = DEFAULT_DEFAULT_TIME_TO_LIVE;
+        private int memtableFlushPeriodInMs = DEFAULT_MEMTABLE_FLUSH_PERIOD_IN_MS;
+        private int minIndexInterval = DEFAULT_MIN_INDEX_INTERVAL;
+        private int maxIndexInterval = DEFAULT_MAX_INDEX_INTERVAL;
+        private SpeculativeRetryParam speculativeRetry = SpeculativeRetryParam.DEFAULT;
+        private CachingParams caching = CachingParams.DEFAULT;
+        private CompactionParams compaction = CompactionParams.DEFAULT;
+        private CompressionParams compression = CompressionParams.DEFAULT;
+
+        public Builder()
+        {
+        }
+
+        public TableParams build()
+        {
+            return new TableParams(this);
+        }
+
+        public Builder comment(String val)
+        {
+            comment = val;
+            return this;
+        }
+
+        public Builder readRepairChance(double val)
+        {
+            readRepairChance = val;
+            return this;
+        }
+
+        public Builder dcLocalReadRepairChance(double val)
+        {
+            dcLocalReadRepairChance = val;
+            return this;
+        }
+
+        public Builder bloomFilterFpChance(double val)
+        {
+            bloomFilterFpChance = val;
+            return this;
+        }
+
+        public Builder gcGraceSeconds(int val)
+        {
+            gcGraceSeconds = val;
+            return this;
+        }
+
+        public Builder defaultTimeToLive(int val)
+        {
+            defaultTimeToLive = val;
+            return this;
+        }
+
+        public Builder memtableFlushPeriodInMs(int val)
+        {
+            memtableFlushPeriodInMs = val;
+            return this;
+        }
+
+        public Builder minIndexInterval(int val)
+        {
+            minIndexInterval = val;
+            return this;
+        }
+
+        public Builder maxIndexInterval(int val)
+        {
+            maxIndexInterval = val;
+            return this;
+        }
+
+        public Builder speculativeRetry(SpeculativeRetryParam val)
+        {
+            speculativeRetry = val;
+            return this;
+        }
+
+        public Builder caching(CachingParams val)
+        {
+            caching = val;
+            return this;
+        }
+
+        public Builder compaction(CompactionParams val)
+        {
+            compaction = val;
+            return this;
+        }
+
+        public Builder compression(CompressionParams val)
+        {
+            compression = val;
+            return this;
+        }
+    }
+}


[2/5] cassandra git commit: Factor out TableParams from CFMetaData

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/AbstractReadExecutor.java b/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
index 9a57f45..487a14c 100644
--- a/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
+++ b/src/java/org/apache/cassandra/service/AbstractReadExecutor.java
@@ -28,7 +28,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
-import org.apache.cassandra.config.CFMetaData.SpeculativeRetry.RetryType;
 import org.apache.cassandra.config.ReadRepairDecision;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.ConsistencyLevel;
@@ -42,10 +41,10 @@ import org.apache.cassandra.exceptions.UnavailableException;
 import org.apache.cassandra.metrics.ReadRepairMetrics;
 import org.apache.cassandra.net.MessageOut;
 import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.schema.SpeculativeRetryParam;
 import org.apache.cassandra.service.StorageProxy.LocalReadRunnable;
 import org.apache.cassandra.tracing.TraceState;
 import org.apache.cassandra.tracing.Tracing;
-import org.apache.cassandra.utils.FBUtilities;
 
 /**
  * Sends a read request to the replicas needed to satisfy a given ConsistencyLevel.
@@ -159,10 +158,10 @@ public abstract class AbstractReadExecutor
         }
 
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.metadata().cfId);
-        RetryType retryType = cfs.metadata.getSpeculativeRetry().type;
+        SpeculativeRetryParam retry = cfs.metadata.params.speculativeRetry;
 
         // Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
-        if (retryType == RetryType.NONE || consistencyLevel.blockFor(keyspace) == allReplicas.size())
+        if (retry.equals(SpeculativeRetryParam.NONE) || consistencyLevel.blockFor(keyspace) == allReplicas.size())
             return new NeverSpeculatingReadExecutor(keyspace, command, consistencyLevel, targetReplicas);
 
         if (targetReplicas.size() == allReplicas.size())
@@ -190,7 +189,7 @@ public abstract class AbstractReadExecutor
         }
         targetReplicas.add(extraReplica);
 
-        if (retryType == RetryType.ALWAYS)
+        if (retry.equals(SpeculativeRetryParam.ALWAYS))
             return new AlwaysSpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);
         else // PERCENTILE or CUSTOM.
             return new SpeculatingReadExecutor(keyspace, cfs, command, consistencyLevel, targetReplicas);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/service/CacheService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/CacheService.java b/src/java/org/apache/cassandra/service/CacheService.java
index 253e9e4..c9d9fa5 100644
--- a/src/java/org/apache/cassandra/service/CacheService.java
+++ b/src/java/org/apache/cassandra/service/CacheService.java
@@ -421,7 +421,7 @@ public class CacheService implements CacheServiceMBean
         public Future<Pair<RowCacheKey, IRowCacheEntry>> deserialize(DataInputPlus in, final ColumnFamilyStore cfs) throws IOException
         {
             final ByteBuffer buffer = ByteBufferUtil.readWithLength(in);
-            final int rowsToCache = cfs.metadata.getCaching().rowCache.rowsToCache;
+            final int rowsToCache = cfs.metadata.params.caching.rowsPerPartitionToCache();
 
             return StageManager.getStage(Stage.READ).submit(new Callable<Pair<RowCacheKey, IRowCacheEntry>>()
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/service/StorageService.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java
index 9ac94cc..f263a82 100644
--- a/src/java/org/apache/cassandra/service/StorageService.java
+++ b/src/java/org/apache/cassandra/service/StorageService.java
@@ -3277,7 +3277,7 @@ public class StorageService extends NotificationBroadcasterSupport implements IE
             Token token = tokens.get(index);
             Range<Token> range = new Range<>(prevToken, token);
             // always return an estimate > 0 (see CASSANDRA-7322)
-            splits.add(Pair.create(range, Math.max(cfs.metadata.getMinIndexInterval(), cfs.estimatedKeysForRange(range))));
+            splits.add(Pair.create(range, Math.max(cfs.metadata.params.minIndexInterval, cfs.estimatedKeysForRange(range))));
             prevToken = token;
         }
         return splits;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/streaming/compress/CompressionInfo.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/compress/CompressionInfo.java b/src/java/org/apache/cassandra/streaming/compress/CompressionInfo.java
index 924a656..bd0c2d5 100644
--- a/src/java/org/apache/cassandra/streaming/compress/CompressionInfo.java
+++ b/src/java/org/apache/cassandra/streaming/compress/CompressionInfo.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.compress.CompressionMetadata;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
 
@@ -34,9 +34,9 @@ public class CompressionInfo
     public static final IVersionedSerializer<CompressionInfo> serializer = new CompressionInfoSerializer();
 
     public final CompressionMetadata.Chunk[] chunks;
-    public final CompressionParameters parameters;
+    public final CompressionParams parameters;
 
-    public CompressionInfo(CompressionMetadata.Chunk[] chunks, CompressionParameters parameters)
+    public CompressionInfo(CompressionMetadata.Chunk[] chunks, CompressionParams parameters)
     {
         assert chunks != null && parameters != null;
         this.chunks = chunks;
@@ -58,7 +58,7 @@ public class CompressionInfo
             for (int i = 0; i < chunkCount; i++)
                 CompressionMetadata.Chunk.serializer.serialize(info.chunks[i], out, version);
             // compression params
-            CompressionParameters.serializer.serialize(info.parameters, out, version);
+            CompressionParams.serializer.serialize(info.parameters, out, version);
         }
 
         public CompressionInfo deserialize(DataInputPlus in, int version) throws IOException
@@ -73,7 +73,7 @@ public class CompressionInfo
                 chunks[i] = CompressionMetadata.Chunk.serializer.deserialize(in, version);
 
             // compression params
-            CompressionParameters parameters = CompressionParameters.serializer.deserialize(in, version);
+            CompressionParams parameters = CompressionParams.serializer.deserialize(in, version);
             return new CompressionInfo(chunks, parameters);
         }
 
@@ -88,7 +88,7 @@ public class CompressionInfo
             for (int i = 0; i < chunkCount; i++)
                 size += CompressionMetadata.Chunk.serializer.serializedSize(info.chunks[i], version);
             // compression params
-            size += CompressionParameters.serializer.serializedSize(info.parameters, version);
+            size += CompressionParams.serializer.serializedSize(info.parameters, version);
             return size;
         }
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/thrift/CassandraServer.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/CassandraServer.java b/src/java/org/apache/cassandra/thrift/CassandraServer.java
index c679479..2f47452 100644
--- a/src/java/org/apache/cassandra/thrift/CassandraServer.java
+++ b/src/java/org/apache/cassandra/thrift/CassandraServer.java
@@ -1845,8 +1845,7 @@ public class CassandraServer implements Cassandra.Iface
         requestScheduler.release();
     }
 
-    public String system_add_column_family(CfDef cf_def)
-    throws InvalidRequestException, SchemaDisagreementException, TException
+    public String system_add_column_family(CfDef cf_def) throws TException
     {
         logger.debug("add_column_family");
 
@@ -1857,7 +1856,7 @@ public class CassandraServer implements Cassandra.Iface
             cState.hasKeyspaceAccess(keyspace, Permission.CREATE);
             cf_def.unsetId(); // explicitly ignore any id set by client (Hector likes to set zero)
             CFMetaData cfm = ThriftConversion.fromThrift(cf_def);
-            CFMetaData.validateCompactionOptions(cfm.compactionStrategyClass, cfm.compactionStrategyOptions);
+            cfm.params.compaction.validate();
             cfm.addDefaultIndexNames();
 
             if (!cfm.getTriggers().isEmpty())
@@ -2007,7 +2006,7 @@ public class CassandraServer implements Cassandra.Iface
                 throw new InvalidRequestException("Cannot modify CQL3 table " + oldCfm.cfName + " as it may break the schema. You should use cqlsh to modify CQL3 tables instead.");
 
             CFMetaData cfm = ThriftConversion.fromThriftForUpdate(cf_def, oldCfm);
-            CFMetaData.validateCompactionOptions(cfm.compactionStrategyClass, cfm.compactionStrategyOptions);
+            cfm.params.compaction.validate();
             cfm.addDefaultIndexNames();
 
             if (!oldCfm.getTriggers().equals(cfm.getTriggers()))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/thrift/ThriftConversion.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/thrift/ThriftConversion.java b/src/java/org/apache/cassandra/thrift/ThriftConversion.java
index 36383e0..1744177 100644
--- a/src/java/org/apache/cassandra/thrift/ThriftConversion.java
+++ b/src/java/org/apache/cassandra/thrift/ThriftConversion.java
@@ -20,11 +20,10 @@ package org.apache.cassandra.thrift;
 import java.util.*;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
 import com.google.common.collect.Maps;
 
+import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
 import org.apache.cassandra.io.compress.ICompressor;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -37,7 +36,7 @@ import org.apache.cassandra.db.WriteType;
 import org.apache.cassandra.db.filter.RowFilter;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.*;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.locator.LocalStrategy;
 import org.apache.cassandra.schema.*;
@@ -158,7 +157,7 @@ public class ThriftConversion
         Map<String, String> replicationMap = new HashMap<>();
         if (ksd.strategy_options != null)
             replicationMap.putAll(ksd.strategy_options);
-        replicationMap.put(KeyspaceParams.Replication.CLASS, cls.getName());
+        replicationMap.put(ReplicationParams.CLASS, cls.getName());
 
         return KeyspaceMetadata.create(ksd.name, KeyspaceParams.create(ksd.durable_writes, replicationMap), Tables.of(cfDefs));
     }
@@ -266,6 +265,7 @@ public class ThriftConversion
 
             // If it's a thrift table creation, adds the default CQL metadata for the new table
             if (isCreation)
+            {
                 addDefaultCQLMetadata(defs,
                                       cf_def.keyspace,
                                       cf_def.name,
@@ -273,6 +273,7 @@ public class ThriftConversion
                                       rawComparator,
                                       subComparator,
                                       defaultValidator);
+            }
 
             // We do not allow Thrift materialized views, so we always set it to false
             boolean isMaterializedView = false;
@@ -281,20 +282,15 @@ public class ThriftConversion
 
             if (cf_def.isSetGc_grace_seconds())
                 newCFMD.gcGraceSeconds(cf_def.gc_grace_seconds);
-            if (cf_def.isSetMin_compaction_threshold())
-                newCFMD.minCompactionThreshold(cf_def.min_compaction_threshold);
-            if (cf_def.isSetMax_compaction_threshold())
-                newCFMD.maxCompactionThreshold(cf_def.max_compaction_threshold);
-            if (cf_def.isSetCompaction_strategy())
-                newCFMD.compactionStrategyClass(CFMetaData.createCompactionStrategy(cf_def.compaction_strategy));
-            if (cf_def.isSetCompaction_strategy_options())
-                newCFMD.compactionStrategyOptions(new HashMap<>(cf_def.compaction_strategy_options));
+
+            newCFMD.compaction(compactionParamsFromThrift(cf_def));
+
             if (cf_def.isSetBloom_filter_fp_chance())
                 newCFMD.bloomFilterFpChance(cf_def.bloom_filter_fp_chance);
             if (cf_def.isSetMemtable_flush_period_in_ms())
                 newCFMD.memtableFlushPeriod(cf_def.memtable_flush_period_in_ms);
             if (cf_def.isSetCaching() || cf_def.isSetCells_per_row_to_cache())
-                newCFMD.caching(CachingOptions.fromThrift(cf_def.caching, cf_def.cells_per_row_to_cache));
+                newCFMD.caching(cachingFromThrift(cf_def.caching, cf_def.cells_per_row_to_cache));
             if (cf_def.isSetRead_repair_chance())
                 newCFMD.readRepairChance(cf_def.read_repair_chance);
             if (cf_def.isSetDefault_time_to_live())
@@ -306,12 +302,15 @@ public class ThriftConversion
             if (cf_def.isSetMax_index_interval())
                 newCFMD.maxIndexInterval(cf_def.max_index_interval);
             if (cf_def.isSetSpeculative_retry())
-                newCFMD.speculativeRetry(CFMetaData.SpeculativeRetry.fromString(cf_def.speculative_retry));
+                newCFMD.speculativeRetry(SpeculativeRetryParam.fromString(cf_def.speculative_retry));
             if (cf_def.isSetTriggers())
                 newCFMD.triggers(triggerDefinitionsFromThrift(cf_def.triggers));
+            if (cf_def.isSetComment())
+                newCFMD.comment(cf_def.comment);
+            if (cf_def.isSetCompression_options())
+                newCFMD.compression(compressionParametersFromThrift(cf_def.compression_options));
 
-            return newCFMD.comment(cf_def.comment)
-                          .compressionParameters(compressionParametersFromThrift(cf_def.compression_options));
+            return newCFMD;
         }
         catch (SyntaxException | MarshalException e)
         {
@@ -319,9 +318,28 @@ public class ThriftConversion
         }
     }
 
-    private static CompressionParameters compressionParametersFromThrift(Map<String, String> compression_options)
+    @SuppressWarnings("unchecked")
+    private static CompactionParams compactionParamsFromThrift(CfDef cf_def)
     {
-        CompressionParameters compressionParameter = CompressionParameters.fromMap(compression_options);
+        Class<? extends AbstractCompactionStrategy> klass =
+            CFMetaData.createCompactionStrategy(cf_def.compaction_strategy);
+        Map<String, String> options = new HashMap<>(cf_def.compaction_strategy_options);
+
+        int minThreshold = cf_def.min_compaction_threshold;
+        int maxThreshold = cf_def.max_compaction_threshold;
+
+        if (CompactionParams.supportsThresholdParams(klass))
+        {
+            options.putIfAbsent(CompactionParams.Option.MIN_THRESHOLD.toString(), Integer.toString(minThreshold));
+            options.putIfAbsent(CompactionParams.Option.MAX_THRESHOLD.toString(), Integer.toString(maxThreshold));
+        }
+
+        return CompactionParams.create(klass, options);
+    }
+
+    private static CompressionParams compressionParametersFromThrift(Map<String, String> compression_options)
+    {
+        CompressionParams compressionParameter = CompressionParams.fromMap(compression_options);
         compressionParameter.validate();
         return compressionParameter;
     }
@@ -375,19 +393,19 @@ public class ThriftConversion
         if (!cf_def.isSetComment())
             cf_def.setComment("");
         if (!cf_def.isSetMin_compaction_threshold())
-            cf_def.setMin_compaction_threshold(CFMetaData.DEFAULT_MIN_COMPACTION_THRESHOLD);
+            cf_def.setMin_compaction_threshold(CompactionParams.DEFAULT_MIN_THRESHOLD);
         if (!cf_def.isSetMax_compaction_threshold())
-            cf_def.setMax_compaction_threshold(CFMetaData.DEFAULT_MAX_COMPACTION_THRESHOLD);
-        if (cf_def.compaction_strategy == null)
-            cf_def.compaction_strategy = CFMetaData.DEFAULT_COMPACTION_STRATEGY_CLASS.getSimpleName();
-        if (cf_def.compaction_strategy_options == null)
-            cf_def.compaction_strategy_options = Collections.emptyMap();
+            cf_def.setMax_compaction_threshold(CompactionParams.DEFAULT_MAX_THRESHOLD);
+        if (!cf_def.isSetCompaction_strategy())
+            cf_def.setCompaction_strategy(CompactionParams.DEFAULT.klass().getSimpleName());
+        if (!cf_def.isSetCompaction_strategy_options())
+            cf_def.setCompaction_strategy_options(Collections.emptyMap());
         if (!cf_def.isSetCompression_options())
-            cf_def.setCompression_options(Collections.singletonMap(CompressionParameters.SSTABLE_COMPRESSION, CFMetaData.DEFAULT_COMPRESSOR));
+            cf_def.setCompression_options(Collections.singletonMap(CompressionParams.SSTABLE_COMPRESSION, CompressionParams.DEFAULT.klass().getCanonicalName()));
         if (!cf_def.isSetDefault_time_to_live())
-            cf_def.setDefault_time_to_live(CFMetaData.DEFAULT_DEFAULT_TIME_TO_LIVE);
+            cf_def.setDefault_time_to_live(TableParams.DEFAULT_DEFAULT_TIME_TO_LIVE);
         if (!cf_def.isSetDclocal_read_repair_chance())
-            cf_def.setDclocal_read_repair_chance(CFMetaData.DEFAULT_DCLOCAL_READ_REPAIR_CHANCE);
+            cf_def.setDclocal_read_repair_chance(TableParams.DEFAULT_DCLOCAL_READ_REPAIR_CHANCE);
 
         // if index_interval was set, use that for the min_index_interval default
         if (!cf_def.isSetMin_index_interval())
@@ -395,13 +413,13 @@ public class ThriftConversion
             if (cf_def.isSetIndex_interval())
                 cf_def.setMin_index_interval(cf_def.getIndex_interval());
             else
-                cf_def.setMin_index_interval(CFMetaData.DEFAULT_MIN_INDEX_INTERVAL);
+                cf_def.setMin_index_interval(TableParams.DEFAULT_MIN_INDEX_INTERVAL);
         }
 
         if (!cf_def.isSetMax_index_interval())
         {
             // ensure the max is at least as large as the min
-            cf_def.setMax_index_interval(Math.max(cf_def.min_index_interval, CFMetaData.DEFAULT_MAX_INDEX_INTERVAL));
+            cf_def.setMax_index_interval(Math.max(cf_def.min_index_interval, TableParams.DEFAULT_MAX_INDEX_INTERVAL));
         }
     }
 
@@ -420,29 +438,29 @@ public class ThriftConversion
             def.setComparator_type(LegacyLayout.makeLegacyComparator(cfm).toString());
         }
 
-        def.setComment(Strings.nullToEmpty(cfm.getComment()));
-        def.setRead_repair_chance(cfm.getReadRepairChance());
-        def.setDclocal_read_repair_chance(cfm.getDcLocalReadRepairChance());
-        def.setGc_grace_seconds(cfm.getGcGraceSeconds());
+        def.setComment(cfm.params.comment);
+        def.setRead_repair_chance(cfm.params.readRepairChance);
+        def.setDclocal_read_repair_chance(cfm.params.dcLocalReadRepairChance);
+        def.setGc_grace_seconds(cfm.params.gcGraceSeconds);
         def.setDefault_validation_class(cfm.makeLegacyDefaultValidator().toString());
         def.setKey_validation_class(cfm.getKeyValidator().toString());
-        def.setMin_compaction_threshold(cfm.getMinCompactionThreshold());
-        def.setMax_compaction_threshold(cfm.getMaxCompactionThreshold());
+        def.setMin_compaction_threshold(cfm.params.compaction.minCompactionThreshold());
+        def.setMax_compaction_threshold(cfm.params.compaction.maxCompactionThreshold());
         // We only return the alias if only one is set since thrift don't know about multiple key aliases
         if (cfm.partitionKeyColumns().size() == 1)
             def.setKey_alias(cfm.partitionKeyColumns().get(0).name.bytes);
         def.setColumn_metadata(columnDefinitionsToThrift(cfm, cfm.allColumns()));
-        def.setCompaction_strategy(cfm.compactionStrategyClass.getName());
-        def.setCompaction_strategy_options(new HashMap<>(cfm.compactionStrategyOptions));
-        def.setCompression_options(compressionParametersToThrift(cfm.compressionParameters));
-        def.setBloom_filter_fp_chance(cfm.getBloomFilterFpChance());
-        def.setMin_index_interval(cfm.getMinIndexInterval());
-        def.setMax_index_interval(cfm.getMaxIndexInterval());
-        def.setMemtable_flush_period_in_ms(cfm.getMemtableFlushPeriod());
-        def.setCaching(cfm.getCaching().toThriftCaching());
-        def.setCells_per_row_to_cache(cfm.getCaching().toThriftCellsPerRow());
-        def.setDefault_time_to_live(cfm.getDefaultTimeToLive());
-        def.setSpeculative_retry(cfm.getSpeculativeRetry().toString());
+        def.setCompaction_strategy(cfm.params.compaction.klass().getName());
+        def.setCompaction_strategy_options(cfm.params.compaction.options());
+        def.setCompression_options(compressionParametersToThrift(cfm.params.compression));
+        def.setBloom_filter_fp_chance(cfm.params.bloomFilterFpChance);
+        def.setMin_index_interval(cfm.params.minIndexInterval);
+        def.setMax_index_interval(cfm.params.maxIndexInterval);
+        def.setMemtable_flush_period_in_ms(cfm.params.memtableFlushPeriodInMs);
+        def.setCaching(toThrift(cfm.params.caching));
+        def.setCells_per_row_to_cache(toThriftCellsPerRow(cfm.params.caching));
+        def.setDefault_time_to_live(cfm.params.defaultTimeToLive);
+        def.setSpeculative_retry(cfm.params.speculativeRetry.toString());
         def.setTriggers(triggerDefinitionsToThrift(cfm.getTriggers()));
 
         return def;
@@ -543,15 +561,80 @@ public class ThriftConversion
     }
 
     @SuppressWarnings("deprecation")
-    public static Map<String, String> compressionParametersToThrift(CompressionParameters parameters)
+    public static Map<String, String> compressionParametersToThrift(CompressionParams parameters)
     {
         if (!parameters.isEnabled())
             return Collections.emptyMap();
 
         Map<String, String> options = new HashMap<>(parameters.getOtherOptions());
         Class<? extends ICompressor> klass = parameters.getSstableCompressor().getClass();
-        options.put(CompressionParameters.SSTABLE_COMPRESSION, klass.getName());
-        options.put(CompressionParameters.CHUNK_LENGTH_KB, parameters.chunkLengthInKB());
+        options.put(CompressionParams.SSTABLE_COMPRESSION, klass.getName());
+        options.put(CompressionParams.CHUNK_LENGTH_KB, parameters.chunkLengthInKB());
         return options;
     }
+
+    private static String toThrift(CachingParams caching)
+    {
+        if (caching.cacheRows() && caching.cacheKeys())
+            return "ALL";
+
+        if (caching.cacheRows())
+            return "ROWS_ONLY";
+
+        if (caching.cacheKeys())
+            return "KEYS_ONLY";
+
+        return "NONE";
+    }
+
+    private static CachingParams cachingFromTrhfit(String caching)
+    {
+        switch (caching.toUpperCase())
+        {
+            case "ALL":
+                return CachingParams.CACHE_EVERYTHING;
+            case "ROWS_ONLY":
+                return new CachingParams(false, Integer.MAX_VALUE);
+            case "KEYS_ONLY":
+                return CachingParams.CACHE_KEYS;
+            case "NONE":
+                return CachingParams.CACHE_NOTHING;
+            default:
+                throw new ConfigurationException(String.format("Invalid value %s for caching parameter", caching));
+        }
+    }
+
+    private static String toThriftCellsPerRow(CachingParams caching)
+    {
+        return caching.cacheAllRows()
+             ? "ALL"
+             : String.valueOf(caching.rowsPerPartitionToCache());
+    }
+
+    private static int fromThriftCellsPerRow(String value)
+    {
+        return "ALL".equals(value)
+             ? Integer.MAX_VALUE
+             : Integer.parseInt(value);
+    }
+
+    public static CachingParams cachingFromThrift(String caching, String cellsPerRow)
+    {
+        boolean cacheKeys = true;
+        int rowsPerPartitionToCache = 0;
+
+        // if we get a caching string from thrift it is legacy, "ALL", "KEYS_ONLY" etc
+        if (caching != null)
+        {
+            CachingParams parsed = cachingFromTrhfit(caching);
+            cacheKeys = parsed.cacheKeys();
+            rowsPerPartitionToCache = parsed.rowsPerPartitionToCache();
+        }
+
+        // if we get cells_per_row from thrift, it is either "ALL" or "<number of cells to cache>".
+        if (cellsPerRow != null && rowsPerPartitionToCache > 0)
+            rowsPerPartitionToCache = fromThriftCellsPerRow(cellsPerRow);
+
+        return new CachingParams(cacheKeys, rowsPerPartitionToCache);
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/utils/FBUtilities.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/FBUtilities.java b/src/java/org/apache/cassandra/utils/FBUtilities.java
index 8ce9455..9569619 100644
--- a/src/java/org/apache/cassandra/utils/FBUtilities.java
+++ b/src/java/org/apache/cassandra/utils/FBUtilities.java
@@ -19,17 +19,13 @@ package org.apache.cassandra.utils;
 
 import java.io.*;
 import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.math.BigInteger;
 import java.net.*;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
-import java.text.NumberFormat;
 import java.util.*;
 import java.util.concurrent.*;
-import java.util.zip.Adler32;
 import java.util.zip.Checksum;
 
 import com.google.common.base.Joiner;
@@ -48,7 +44,7 @@ import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.IVersionedSerializer;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.util.DataOutputBuffer;
 import org.apache.cassandra.io.util.DataOutputBufferFixed;
 import org.apache.cassandra.io.util.FileUtils;
@@ -604,7 +600,7 @@ public class FBUtilities
         @Override
         protected byte[] initialValue()
         {
-            return new byte[CompressionParameters.DEFAULT_CHUNK_LENGTH];
+            return new byte[CompressionParams.DEFAULT_CHUNK_LENGTH];
         }
     };
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java b/src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java
index 5063245..fb65e34 100644
--- a/src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java
+++ b/src/java/org/apache/cassandra/utils/NativeSSTableLoaderClient.java
@@ -121,7 +121,7 @@ public class NativeSSTableLoaderClient extends SSTableLoader.Client
             boolean isCounter = flags.contains(CFMetaData.Flag.COUNTER);
             boolean isDense = flags.contains(CFMetaData.Flag.DENSE);
             boolean isCompound = flags.contains(CFMetaData.Flag.COMPOUND);
-            boolean isMaterializedView = flags.contains(CFMetaData.Flag.MATERIALIZEDVIEW);
+            boolean isMaterializedView = flags.contains(CFMetaData.Flag.VIEW);
 
             String columnsQuery = String.format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND table_name = ?",
                                                 SchemaKeyspace.NAME,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
index 9383410..20faa98 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongCompactionsTest.java
@@ -36,6 +36,7 @@ import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.io.sstable.SSTableUtils;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -49,13 +50,12 @@ public class LongCompactionsTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        Map<String, String> compactionOptions = new HashMap<>();
-        compactionOptions.put("tombstone_compaction_interval", "1");
+        Map<String, String> compactionOptions = Collections.singletonMap("tombstone_compaction_interval", "1");
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD)
-                                                .compactionStrategyOptions(compactionOptions));
+                                                .compaction(CompactionParams.scts(compactionOptions)));
     }
 
     @Before
@@ -123,7 +123,7 @@ public class LongCompactionsTest
         Thread.sleep(1000);
 
         long start = System.nanoTime();
-        final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getCFMetaData(KEYSPACE1, "Standard1").getGcGraceSeconds();
+        final int gcBefore = (int) (System.currentTimeMillis() / 1000) - Schema.instance.getCFMetaData(KEYSPACE1, "Standard1").params.gcGraceSeconds;
         try (LifecycleTransaction txn = store.getTracker().tryModify(sstables, OperationType.COMPACTION))
         {
             assert txn != null : "Cannot markCompacting all sstables";
@@ -146,7 +146,7 @@ public class LongCompactionsTest
         cfs.clearUnsafe();
 
         final int ROWS_PER_SSTABLE = 10;
-        final int SSTABLES = cfs.metadata.getMinIndexInterval() * 3 / ROWS_PER_SSTABLE;
+        final int SSTABLES = cfs.metadata.params.minIndexInterval * 3 / ROWS_PER_SSTABLE;
 
         // disable compaction while flushing
         cfs.disableAutoCompaction();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
index 97fd3b3..96ee072 100644
--- a/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
+++ b/test/long/org/apache/cassandra/db/compaction/LongLeveledCompactionStrategyTest.java
@@ -30,6 +30,7 @@ import org.apache.cassandra.Util;
 import org.apache.cassandra.UpdateBuilder;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -47,8 +48,7 @@ public class LongLeveledCompactionStrategyTest
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARDLVL)
-                                                .compactionStrategyClass(LeveledCompactionStrategy.class)
-                                                .compactionStrategyOptions(leveledOptions));
+                                                .compaction(CompactionParams.lcs(leveledOptions)));
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/MockSchema.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/MockSchema.java b/test/unit/org/apache/cassandra/MockSchema.java
index 11892a8..249dd8d 100644
--- a/test/unit/org/apache/cassandra/MockSchema.java
+++ b/test/unit/org/apache/cassandra/MockSchema.java
@@ -26,7 +26,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.ImmutableSet;
 
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
@@ -45,6 +44,7 @@ import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.io.util.SegmentedFile;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.AlwaysPresentFilter;
@@ -150,7 +150,7 @@ public class MockSchema
                                                 .addRegularColumn("value", UTF8Type.instance)
                                                 .withPartitioner(Murmur3Partitioner.instance)
                                                 .build();
-        metadata.caching(CachingOptions.NONE);
+        metadata.caching(CachingParams.CACHE_NOTHING);
         return metadata;
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/SchemaLoader.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/SchemaLoader.java b/test/unit/org/apache/cassandra/SchemaLoader.java
index 59b66fe..ffe0ac8 100644
--- a/test/unit/org/apache/cassandra/SchemaLoader.java
+++ b/test/unit/org/apache/cassandra/SchemaLoader.java
@@ -24,21 +24,20 @@ import java.util.*;
 import org.junit.After;
 import org.junit.BeforeClass;
 
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.*;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.commitlog.CommitLog;
-import org.apache.cassandra.db.compaction.LeveledCompactionStrategy;
 import org.apache.cassandra.db.index.PerRowSecondaryIndexTest;
 import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.gms.Gossiper;
-import org.apache.cassandra.io.compress.CompressionParameters;
-import org.apache.cassandra.io.compress.SnappyCompressor;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.CachingParams;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.Tables;
@@ -118,7 +117,7 @@ public class SchemaLoader
                 KeyspaceParams.simple(1),
                 Tables.of(
                 // Column Families
-                standardCFMD(ks1, "Standard1").compactionStrategyOptions(compactionOptions),
+                standardCFMD(ks1, "Standard1").compaction(CompactionParams.scts(compactionOptions)),
                 standardCFMD(ks1, "Standard2"),
                 standardCFMD(ks1, "Standard3"),
                 standardCFMD(ks1, "Standard4"),
@@ -147,15 +146,11 @@ public class SchemaLoader
                 //CFMetaData.Builder.create(ks1, "StandardComposite", false, true, false).withColumnNameComparator(composite).build(),
                 //CFMetaData.Builder.create(ks1, "StandardComposite2", false, true, false).withColumnNameComparator(compositeMaxMin).build(),
                 //CFMetaData.Builder.create(ks1, "StandardDynamicComposite", false, true, false).withColumnNameComparator(dynamicComposite).build(),
-                standardCFMD(ks1, "StandardLeveled")
-                        .compactionStrategyClass(LeveledCompactionStrategy.class)
-                        .compactionStrategyOptions(leveledOptions),
-                standardCFMD(ks1, "legacyleveled")
-                        .compactionStrategyClass(LeveledCompactionStrategy.class)
-                        .compactionStrategyOptions(leveledOptions),
+                standardCFMD(ks1, "StandardLeveled").compaction(CompactionParams.lcs(leveledOptions)),
+                standardCFMD(ks1, "legacyleveled").compaction(CompactionParams.lcs(leveledOptions)),
                 standardCFMD(ks1, "StandardLowIndexInterval").minIndexInterval(8)
-                        .maxIndexInterval(256)
-                        .caching(CachingOptions.NONE)
+                                                             .maxIndexInterval(256)
+                                                             .caching(CachingParams.CACHE_NOTHING)
                 //CFMetaData.Builder.create(ks1, "UUIDKeys").addPartitionKey("key",UUIDType.instance).build(),
                 //CFMetaData.Builder.create(ks1, "MixedTypes").withColumnNameComparator(LongType.instance).addPartitionKey("key", UUIDType.instance).build(),
                 //CFMetaData.Builder.create(ks1, "MixedTypesComposite", false, true, false).withColumnNameComparator(composite).addPartitionKey("key", composite).build(),
@@ -213,11 +208,10 @@ public class SchemaLoader
         schema.add(KeyspaceMetadata.create(ks_rcs,
                 KeyspaceParams.simple(1),
                 Tables.of(
-                standardCFMD(ks_rcs, "CFWithoutCache").caching(CachingOptions.NONE),
-                standardCFMD(ks_rcs, "CachedCF").caching(CachingOptions.ALL),
+                standardCFMD(ks_rcs, "CFWithoutCache").caching(CachingParams.CACHE_NOTHING),
+                standardCFMD(ks_rcs, "CachedCF").caching(CachingParams.CACHE_EVERYTHING),
                 standardCFMD(ks_rcs, "CachedIntCF").
-                        caching(new CachingOptions(new CachingOptions.KeyCache(CachingOptions.KeyCache.Type.ALL),
-                                new CachingOptions.RowCache(CachingOptions.RowCache.Type.HEAD, 100))))));
+                        caching(new CachingParams(true, 100)))));
 
         // CounterCacheSpace
         /*schema.add(KeyspaceMetadata.testMetadata(ks_ccs,
@@ -323,7 +317,7 @@ public class SchemaLoader
     {
         for (KeyspaceMetadata ksm : schema)
             for (CFMetaData cfm : ksm.tables)
-                cfm.compressionParameters(CompressionParameters.snappy());
+                cfm.compression(CompressionParams.snappy());
     }
 
     public static CFMetaData counterCFMD(String ksName, String cfName)
@@ -334,7 +328,7 @@ public class SchemaLoader
                 .addRegularColumn("val", CounterColumnType.instance)
                 .addRegularColumn("val2", CounterColumnType.instance)
                 .build()
-                .compressionParameters(getCompressionParameters());
+                .compression(getCompressionParameters());
     }
 
     public static CFMetaData standardCFMD(String ksName, String cfName)
@@ -363,7 +357,7 @@ public class SchemaLoader
             builder.addRegularColumn("val" + i, AsciiType.instance);
 
         return builder.build()
-               .compressionParameters(getCompressionParameters());
+               .compression(getCompressionParameters());
     }
 
     public static CFMetaData denseCFMD(String ksName, String cfName)
@@ -385,7 +379,7 @@ public class SchemaLoader
             .addClusteringColumn("cols", comp)
             .addRegularColumn("val", AsciiType.instance)
             .build()
-            .compressionParameters(getCompressionParameters());
+            .compression(getCompressionParameters());
     }
 
     // TODO: Fix superCFMD failing on legacy table creation. Seems to be applying composite comparator to partition key
@@ -424,7 +418,7 @@ public class SchemaLoader
             cfm.getColumnDefinition(new ColumnIdentifier("birthdate", true))
                .setIndex("birthdate_key_index", IndexType.COMPOSITES, Collections.EMPTY_MAP);
 
-        return cfm.compressionParameters(getCompressionParameters());
+        return cfm.compression(getCompressionParameters());
     }
     public static CFMetaData keysIndexCFMD(String ksName, String cfName, boolean withIndex) throws ConfigurationException
     {
@@ -440,27 +434,27 @@ public class SchemaLoader
             cfm.getColumnDefinition(new ColumnIdentifier("birthdate", true))
                .setIndex("birthdate_composite_index", IndexType.KEYS, Collections.EMPTY_MAP);
 
-        return cfm.compressionParameters(getCompressionParameters());
+        return cfm.compression(getCompressionParameters());
     }
     
     public static CFMetaData jdbcCFMD(String ksName, String cfName, AbstractType comp)
     {
         return CFMetaData.Builder.create(ksName, cfName).addPartitionKey("key", BytesType.instance)
                                                         .build()
-                                                        .compressionParameters(getCompressionParameters());
+                                                        .compression(getCompressionParameters());
     }
 
-    public static CompressionParameters getCompressionParameters()
+    public static CompressionParams getCompressionParameters()
     {
         return getCompressionParameters(null);
     }
 
-    public static CompressionParameters getCompressionParameters(Integer chunkSize)
+    public static CompressionParams getCompressionParameters(Integer chunkSize)
     {
         if (Boolean.parseBoolean(System.getProperty("cassandra.test.compression", "false")))
-            return CompressionParameters.snappy(chunkSize);
+            return CompressionParams.snappy(chunkSize);
 
-        return CompressionParameters.noCompression();
+        return CompressionParams.noCompression();
     }
 
     public static void cleanupAndLeaveDirs() throws IOException

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
index ced6343..567d516 100644
--- a/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
+++ b/test/unit/org/apache/cassandra/config/CFMetaDataTest.java
@@ -29,7 +29,7 @@ import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.rows.UnfilteredRowIterators;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.io.compress.*;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.SchemaKeyspace;
@@ -125,7 +125,7 @@ public class CFMetaDataTest
 
                 // Testing with compression to catch #3558
                 CFMetaData withCompression = cfm.copy();
-                withCompression.compressionParameters(CompressionParameters.snappy(32768));
+                withCompression.compression(CompressionParams.snappy(32768));
                 checkInverses(withCompression);
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
index 2c30b70..be3568a 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
@@ -461,7 +461,7 @@ public class SecondaryIndexTest extends CQLTester
     {
         String tableName = createTable("CREATE TABLE %s (k int PRIMARY KEY, v int,)");
 
-        execute("ALTER TABLE %s WITH CACHING='ALL'");
+        execute("ALTER TABLE %s WITH CACHING = { 'keys': 'ALL', 'rows_per_partition': 'ALL' }");
         execute("INSERT INTO %s (k,v) VALUES (0,0)");
         execute("INSERT INTO %s (k,v) VALUES (1,1)");
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
index ff9d88b..8e1f438 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/CrcCheckChanceTest.java
@@ -52,9 +52,9 @@ public class CrcCheckChanceTest extends CQLTester
         ColumnFamilyStore indexCfs = cfs.indexManager.getIndexesBackedByCfs().iterator().next();
         cfs.forceBlockingFlush();
 
-        Assert.assertEquals(0.99, cfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals(0.99, cfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals(0.99, cfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
-        Assert.assertEquals(0.99, indexCfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals(0.99, indexCfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals(0.99, indexCfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
 
         //Test for stack overflow
@@ -95,9 +95,9 @@ public class CrcCheckChanceTest extends CQLTester
         //Verify when we alter the value the live sstable readers hold the new one
         alterTable("ALTER TABLE %s WITH compression = {'sstable_compression': 'LZ4Compressor', 'crc_check_chance': 0.01}");
 
-        Assert.assertEquals( 0.01, cfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals( 0.01, cfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals( 0.01, cfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
-        Assert.assertEquals( 0.01, indexCfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals( 0.01, indexCfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals( 0.01, indexCfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
 
         assertRows(execute("SELECT * FROM %s WHERE p=?", "p1"),
@@ -112,9 +112,9 @@ public class CrcCheckChanceTest extends CQLTester
 
         //Verify the call used by JMX still works
         cfs.setCrcCheckChance(0.03);
-        Assert.assertEquals( 0.03, cfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals( 0.03, cfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals( 0.03, cfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
-        Assert.assertEquals( 0.03, indexCfs.metadata.compressionParameters.getCrcCheckChance());
+        Assert.assertEquals( 0.03, indexCfs.metadata.params.compression.getCrcCheckChance());
         Assert.assertEquals( 0.03, indexCfs.getLiveSSTables().iterator().next().getCompressionMetadata().parameters.getCrcCheckChance());
 
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
index 5b43599..9733eb2 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/miscellaneous/OverflowTest.java
@@ -113,8 +113,8 @@ public class OverflowTest extends CQLTester
                     + "AND gc_grace_seconds = 4 "
                     + "AND bloom_filter_fp_chance = 0.01 "
                     + "AND compaction = { 'class' : 'LeveledCompactionStrategy', 'sstable_size_in_mb' : 10 } "
-                    + "AND compression = { 'sstable_compression' : '' } "
-                    + "AND caching = 'all' ");
+                    + "AND compression = { 'enabled': false } "
+                    + "AND caching = { 'keys': 'ALL', 'rows_per_partition': 'ALL' }");
 
         execute("ALTER TABLE %s WITH "
                 + "comment = 'other comment' "
@@ -123,8 +123,8 @@ public class OverflowTest extends CQLTester
                 + "AND gc_grace_seconds = 100 "
                 + "AND bloom_filter_fp_chance = 0.1 "
                 + "AND compaction = { 'class': 'SizeTieredCompactionStrategy', 'min_sstable_size' : 42 } "
-                + "AND compression = { 'sstable_compression' : 'SnappyCompressor' } "
-                + "AND caching = 'rows_only' ");
+                + "AND compression = { 'class' : 'SnappyCompressor' } "
+                + "AND caching = { 'rows_per_partition': 'ALL' }");
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
index 93f1973..b7f814b 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
@@ -112,12 +112,12 @@ public class AlterTest extends CQLTester
         ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
 
         alterTable("ALTER TABLE %s WITH min_index_interval=256 AND max_index_interval=512");
-        assertEquals(256, cfs.metadata.getMinIndexInterval());
-        assertEquals(512, cfs.metadata.getMaxIndexInterval());
+        assertEquals(256, cfs.metadata.params.minIndexInterval);
+        assertEquals(512, cfs.metadata.params.maxIndexInterval);
 
-        alterTable("ALTER TABLE %s WITH caching = 'none'");
-        assertEquals(256, cfs.metadata.getMinIndexInterval());
-        assertEquals(512, cfs.metadata.getMaxIndexInterval());
+        alterTable("ALTER TABLE %s WITH caching = {}");
+        assertEquals(256, cfs.metadata.params.minIndexInterval);
+        assertEquals(512, cfs.metadata.params.maxIndexInterval);
     }
 
     /**
@@ -153,7 +153,9 @@ public class AlterTest extends CQLTester
 
         execute("CREATE TABLE cf1 (a int PRIMARY KEY, b int) WITH compaction = { 'class' : 'SizeTieredCompactionStrategy', 'min_threshold' : 7 }");
         assertRows(execute("SELECT table_name, compaction FROM system_schema.tables WHERE keyspace_name='ks1'"),
-                   row("cf1", map("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy", "min_threshold", "7")));
+                   row("cf1", map("class", "org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy",
+                                  "min_threshold", "7",
+                                  "max_threshold", "32")));
 
         // clean-up
         execute("DROP KEYSPACE ks1");

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
index 8a2e1c9..5c587a8 100644
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectOrderedPartitionerTest.java
@@ -444,7 +444,7 @@ public class SelectOrderedPartitionerTest extends CQLTester
     @Test
     public void testTruncateWithCaching() throws Throwable
     {
-        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int,) WITH CACHING = ALL;");
+        createTable("CREATE TABLE %s (k int PRIMARY KEY, v1 int, v2 int) WITH CACHING = { 'keys': 'ALL', 'rows_per_partition': 'ALL' };");
 
         for (int i = 0; i < 3; i++)
             execute("INSERT INTO %s (k, v1, v2) VALUES (?, ?, ?)", i, i, i * 2);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/HintedHandOffTest.java b/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
index dfa9e26..e06c95a 100644
--- a/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
+++ b/test/unit/org/apache/cassandra/db/HintedHandOffTest.java
@@ -31,7 +31,6 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
 import org.apache.cassandra.db.marshal.Int32Type;
 import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.exceptions.ConfigurationException;
@@ -74,7 +73,6 @@ public class HintedHandOffTest
         ColumnFamilyStore hintStore = systemKeyspace.getColumnFamilyStore(SystemKeyspace.HINTS);
         hintStore.clearUnsafe();
         hintStore.metadata.gcGraceSeconds(36000); // 10 hours
-        hintStore.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
         hintStore.disableAutoCompaction();
 
         // insert 1 hint

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
index f5a9edf..c20fa46 100644
--- a/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
+++ b/test/unit/org/apache/cassandra/db/RangeTombstoneTest.java
@@ -38,7 +38,6 @@ import org.apache.cassandra.config.*;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.UpdateBuilder;
 import org.apache.cassandra.db.compaction.CompactionManager;
-import org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.db.partitions.*;
@@ -467,7 +466,6 @@ public class RangeTombstoneTest
         // remove any existing sstables before starting
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
-        cfs.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
 
         UpdateBuilder builder = UpdateBuilder.create(cfs.metadata, key).withTimestamp(0);
         for (int i = 0; i < 10; i += 2)
@@ -512,7 +510,6 @@ public class RangeTombstoneTest
 
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
-        cfs.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
 
         ColumnDefinition cd = cfs.metadata.getColumnDefinition(indexedColumnName).copy();
         cd.setIndex("test_index", IndexType.CUSTOM, ImmutableMap.of(SecondaryIndex.CUSTOM_INDEX_OPTION_NAME, TestIndex.class.getName()));
@@ -549,8 +546,6 @@ public class RangeTombstoneTest
 
         cfs.truncateBlocking();
         cfs.disableAutoCompaction();
-        cfs.setCompactionStrategyClass(SizeTieredCompactionStrategy.class.getCanonicalName());
-
 
         ColumnDefinition cd = cfs.metadata.getColumnDefinition(indexedColumnName).copy();
         cd.setIndex("test_index", IndexType.CUSTOM, ImmutableMap.of(SecondaryIndex.CUSTOM_INDEX_OPTION_NAME, TestIndex.class.getName()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/RowCacheCQLTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheCQLTest.java b/test/unit/org/apache/cassandra/db/RowCacheCQLTest.java
index cb522d6..a3c0e25 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheCQLTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheCQLTest.java
@@ -30,7 +30,7 @@ public class RowCacheCQLTest extends CQLTester
     public void test7636() throws Throwable
     {
         CacheService.instance.setRowCacheCapacityInMB(1);
-        createTable("CREATE TABLE %s (p1 bigint, c1 int, v int, PRIMARY KEY (p1, c1)) WITH caching = '{\"keys\":\"NONE\", \"rows_per_partition\":\"ALL\"}'");
+        createTable("CREATE TABLE %s (p1 bigint, c1 int, v int, PRIMARY KEY (p1, c1)) WITH caching = { 'keys': 'NONE', 'rows_per_partition': 'ALL' }");
         execute("INSERT INTO %s (p1, c1, v) VALUES (?, ?, ?)", 123L, 10, 12);
         assertEmpty(execute("SELECT * FROM %s WHERE p1 = ? and c1 > ?", 123L, 1000));
         UntypedResultSet res = execute("SELECT * FROM %s WHERE p1 = ? and c1 > ?", 123L, 0);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index b89b792..be22b45 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -28,7 +28,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.cache.RowCacheKey;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Schema;
@@ -40,6 +39,7 @@ import org.apache.cassandra.db.partitions.CachedPartition;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken;
 import org.apache.cassandra.locator.TokenMetadata;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.CacheService;
 import org.apache.cassandra.service.StorageService;
@@ -59,10 +59,9 @@ public class RowCacheTest
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE_CACHED,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE_CACHED, CF_CACHED).caching(CachingOptions.ALL),
+                                    SchemaLoader.standardCFMD(KEYSPACE_CACHED, CF_CACHED).caching(CachingParams.CACHE_EVERYTHING),
                                     SchemaLoader.standardCFMD(KEYSPACE_CACHED, CF_CACHEDINT, 1, IntegerType.instance)
-                                                .caching(new CachingOptions(new CachingOptions.KeyCache(CachingOptions.KeyCache.Type.ALL),
-                                                                            new CachingOptions.RowCache(CachingOptions.RowCache.Type.HEAD, 100))));
+                                                .caching(new CachingParams(true, 100)));
     }
 
     @AfterClass

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 25b9cde..07bd22a 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -91,7 +91,7 @@ public class ScrubTest
                                     SchemaLoader.standardCFMD(KEYSPACE, CF2),
                                     SchemaLoader.standardCFMD(KEYSPACE, CF3),
                                     SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF)
-                                                .compressionParameters(SchemaLoader.getCompressionParameters(COMPRESSION_CHUNK_LENGTH)),
+                                                .compression(SchemaLoader.getCompressionParameters(COMPRESSION_CHUNK_LENGTH)),
                                     SchemaLoader.standardCFMD(KEYSPACE, CF_UUID, 0, UUIDType.instance),
                                     SchemaLoader.keysIndexCFMD(KEYSPACE, CF_INDEX1, true),
                                     SchemaLoader.compositeIndexCFMD(KEYSPACE, CF_INDEX2, true),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/VerifyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java
index f460cb5..3bd4a47 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -29,11 +29,11 @@ import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.io.FSWriteError;
-import org.apache.cassandra.io.compress.*;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.commons.lang3.StringUtils;
@@ -70,19 +70,19 @@ public class VerifyTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        CompressionParameters compressionParameters = CompressionParameters.snappy(32768);
+        CompressionParams compressionParameters = CompressionParams.snappy(32768);
 
         SchemaLoader.loadSchema();
         SchemaLoader.createKeyspace(KEYSPACE,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE, CF).compressionParameters(compressionParameters),
-                                    SchemaLoader.standardCFMD(KEYSPACE, CF2).compressionParameters(compressionParameters),
+                                    SchemaLoader.standardCFMD(KEYSPACE, CF).compression(compressionParameters),
+                                    SchemaLoader.standardCFMD(KEYSPACE, CF2).compression(compressionParameters),
                                     SchemaLoader.standardCFMD(KEYSPACE, CF3),
                                     SchemaLoader.standardCFMD(KEYSPACE, CF4),
                                     SchemaLoader.standardCFMD(KEYSPACE, CORRUPT_CF),
                                     SchemaLoader.standardCFMD(KEYSPACE, CORRUPT_CF2),
-                                    SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF).compressionParameters(compressionParameters),
-                                    SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF2).compressionParameters(compressionParameters),
+                                    SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF).compression(compressionParameters),
+                                    SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF2).compression(compressionParameters),
                                     SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF3),
                                     SchemaLoader.counterCFMD(KEYSPACE, COUNTER_CF4),
                                     SchemaLoader.counterCFMD(KEYSPACE, CORRUPTCOUNTER_CF),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
index 7fd3943..81693c7 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
@@ -94,7 +94,7 @@ public class CommitLogUpgradeTest
                                                 .addClusteringColumn("col", AsciiType.instance)
                                                 .addRegularColumn("val", BytesType.instance)
                                                 .build()
-                                                .compressionParameters(SchemaLoader.getCompressionParameters());
+                                                .compression(SchemaLoader.getCompressionParameters());
         SchemaLoader.loadSchema();
         SchemaLoader.createKeyspace(KEYSPACE,
                                     KeyspaceParams.simple(1),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
index 73baeeb..6dc5f53 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
@@ -209,7 +209,6 @@ public class AntiCompactionTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF);
-        store.setCompactionStrategyClass(compactionStrategy);
         store.disableAutoCompaction();
 
         for (int table = 0; table < 10; table++)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
index c71463b..19d2347 100644
--- a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
@@ -96,9 +96,7 @@ public class BlacklistingCompactionsTest
         final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
 
         final int ROWS_PER_SSTABLE = 10;
-        final int SSTABLES = cfs.metadata.getMinIndexInterval() * 2 / ROWS_PER_SSTABLE;
-
-        cfs.setCompactionStrategyClass(compactionStrategy);
+        final int SSTABLES = cfs.metadata.params.minIndexInterval * 2 / ROWS_PER_SSTABLE;
 
         // disable compaction while flushing
         cfs.disableAutoCompaction();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
index f82a1ba..22f3c6b 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsPurgeTest.java
@@ -26,7 +26,6 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
@@ -36,13 +35,13 @@ import org.apache.cassandra.db.partitions.ArrayBackedPartition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.CachingParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
-import static org.apache.cassandra.Util.dk;
 import static org.junit.Assert.*;
-
+import static org.apache.cassandra.Util.dk;
 
 public class CompactionsPurgeTest
 {
@@ -68,7 +67,7 @@ public class CompactionsPurgeTest
                                     SchemaLoader.standardCFMD(KEYSPACE2, CF_STANDARD1));
         SchemaLoader.createKeyspace(KEYSPACE_CACHED,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE_CACHED, CF_CACHED).caching(CachingOptions.ALL));
+                                    SchemaLoader.standardCFMD(KEYSPACE_CACHED, CF_CACHED).caching(CachingParams.CACHE_EVERYTHING));
         SchemaLoader.createKeyspace(KEYSPACE_CQL,
                                     KeyspaceParams.simple(1),
                                     CFMetaData.compile("CREATE TABLE " + CF_CQL + " ("

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
index 700bc3e..091bc03 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsTest.java
@@ -35,6 +35,7 @@ import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.dht.*;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
@@ -63,14 +64,17 @@ public class CompactionsTest
 
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.denseCFMD(KEYSPACE1, CF_DENSE1).compactionStrategyOptions(compactionOptions),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1).compactionStrategyOptions(compactionOptions),
+                                    SchemaLoader.denseCFMD(KEYSPACE1, CF_DENSE1)
+                                                .compaction(CompactionParams.scts(compactionOptions)),
+                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1)
+                                                .compaction(CompactionParams.scts(compactionOptions)),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD2),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD3),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD4),
                                     SchemaLoader.superCFMD(KEYSPACE1, CF_SUPER1, AsciiType.instance),
                                     SchemaLoader.superCFMD(KEYSPACE1, CF_SUPER5, AsciiType.instance),
-                                    SchemaLoader.superCFMD(KEYSPACE1, CF_SUPERGC, AsciiType.instance).gcGraceSeconds(0));
+                                    SchemaLoader.superCFMD(KEYSPACE1, CF_SUPERGC, AsciiType.instance)
+                                                .gcGraceSeconds(0));
     }
 
     public ColumnFamilyStore testSingleSSTableCompaction(String strategyClassName) throws Exception
@@ -79,7 +83,6 @@ public class CompactionsTest
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_DENSE1);
         store.clearUnsafe();
         store.metadata.gcGraceSeconds(1);
-        store.setCompactionStrategyClass(strategyClassName);
 
         // disable compaction while flushing
         store.disableAutoCompaction();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
index a589b0f..c3be08a 100644
--- a/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/LeveledCompactionStrategyTest.java
@@ -20,10 +20,9 @@ package org.apache.cassandra.db.compaction;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.HashMap;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
 import java.util.Random;
 import java.util.UUID;
 
@@ -48,6 +47,7 @@ import org.apache.cassandra.notifications.SSTableAddedNotification;
 import org.apache.cassandra.notifications.SSTableRepairStatusChanged;
 import org.apache.cassandra.repair.RepairJobDesc;
 import org.apache.cassandra.repair.Validator;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.ActiveRepairService;
 import org.apache.cassandra.utils.FBUtilities;
@@ -67,14 +67,11 @@ public class LeveledCompactionStrategyTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        Map<String, String> leveledOptions = new HashMap<>();
-        leveledOptions.put("sstable_size_in_mb", "1");
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARDDLEVELED)
-                                                .compactionStrategyClass(LeveledCompactionStrategy.class)
-                                                .compactionStrategyOptions(leveledOptions));
+                                                .compaction(CompactionParams.lcs(Collections.singletonMap("sstable_size_in_mb", "1"))));
         }
 
     @Before

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
index fa3e6e3..f55bf52 100644
--- a/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/OneCompactionTest.java
@@ -17,9 +17,8 @@
  */
 package org.apache.cassandra.db.compaction;
 
-import java.util.HashMap;
+import java.util.Collections;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
 import org.junit.BeforeClass;
@@ -31,6 +30,7 @@ import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
 import org.apache.cassandra.exceptions.ConfigurationException;
+import org.apache.cassandra.schema.CompactionParams;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
@@ -46,12 +46,11 @@ public class OneCompactionTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        Map<String, String> leveledOptions = new HashMap<>();
-        leveledOptions.put("sstable_size_in_mb", "1");
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1).compactionStrategyOptions(leveledOptions),
+                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1)
+                                                .compaction(CompactionParams.lcs(Collections.singletonMap("sstable_size_in_mb", "1"))),
                                     SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD2));
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
index 3cefd49..f4dbea8 100644
--- a/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/SizeTieredCompactionStrategyTest.java
@@ -53,13 +53,10 @@ public class SizeTieredCompactionStrategyTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        Map<String, String> leveledOptions = new HashMap<>();
-        leveledOptions.put("sstable_size_in_mb", "1");
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1)
-                                                .compactionStrategyOptions(leveledOptions));
+                                    SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD1));
     }
 
     @Test

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/db/view/MaterializedViewUtilsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/view/MaterializedViewUtilsTest.java b/test/unit/org/apache/cassandra/db/view/MaterializedViewUtilsTest.java
index 299c911..2544714 100644
--- a/test/unit/org/apache/cassandra/db/view/MaterializedViewUtilsTest.java
+++ b/test/unit/org/apache/cassandra/db/view/MaterializedViewUtilsTest.java
@@ -1,4 +1,3 @@
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -38,6 +37,7 @@ import org.apache.cassandra.locator.PropertyFileSnitch;
 import org.apache.cassandra.locator.TokenMetadata;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.ReplicationParams;
 import org.apache.cassandra.service.StorageService;
 
 public class MaterializedViewUtilsTest
@@ -65,7 +65,7 @@ public class MaterializedViewUtilsTest
         metadata.updateNormalToken(new StringToken("D"), InetAddress.getByName("127.0.0.5"));
 
         Map<String, String> replicationMap = new HashMap<>();
-        replicationMap.put(KeyspaceParams.Replication.CLASS, NetworkTopologyStrategy.class.getName());
+        replicationMap.put(ReplicationParams.CLASS, NetworkTopologyStrategy.class.getName());
 
         replicationMap.put("DC1", "1");
         replicationMap.put("DC2", "1");
@@ -97,7 +97,7 @@ public class MaterializedViewUtilsTest
         metadata.updateNormalToken(new StringToken("D"), InetAddress.getByName("127.0.0.5"));
 
         Map<String, String> replicationMap = new HashMap<>();
-        replicationMap.put(KeyspaceParams.Replication.CLASS, NetworkTopologyStrategy.class.getName());
+        replicationMap.put(ReplicationParams.CLASS, NetworkTopologyStrategy.class.getName());
 
         replicationMap.put("DC1", "2");
         replicationMap.put("DC2", "2");

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
index 3966342..cc76a9e 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedRandomAccessReaderTest.java
@@ -21,7 +21,6 @@ package org.apache.cassandra.io.compress;
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
-import java.util.Collections;
 import java.util.Random;
 
 import org.junit.Test;
@@ -35,6 +34,7 @@ import org.apache.cassandra.io.util.ChannelProxy;
 import org.apache.cassandra.io.util.FileMark;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.io.util.SequentialWriter;
+import org.apache.cassandra.schema.CompressionParams;
 import org.apache.cassandra.utils.SyncUtil;
 
 import static org.junit.Assert.assertEquals;
@@ -47,7 +47,7 @@ public class CompressedRandomAccessReaderTest
     {
         // test reset in current buffer or previous one
         testResetAndTruncate(File.createTempFile("normal", "1"), false, 10);
-        testResetAndTruncate(File.createTempFile("normal", "2"), false, CompressionParameters.DEFAULT_CHUNK_LENGTH);
+        testResetAndTruncate(File.createTempFile("normal", "2"), false, CompressionParams.DEFAULT_CHUNK_LENGTH);
     }
 
     @Test
@@ -55,7 +55,7 @@ public class CompressedRandomAccessReaderTest
     {
         // test reset in current buffer or previous one
         testResetAndTruncate(File.createTempFile("compressed", "1"), true, 10);
-        testResetAndTruncate(File.createTempFile("compressed", "2"), true, CompressionParameters.DEFAULT_CHUNK_LENGTH);
+        testResetAndTruncate(File.createTempFile("compressed", "2"), true, CompressionParams.DEFAULT_CHUNK_LENGTH);
     }
 
     @Test
@@ -68,7 +68,7 @@ public class CompressedRandomAccessReaderTest
         {
 
             MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
-            CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata",  CompressionParameters.snappy(32), sstableMetadataCollector);
+            CompressedSequentialWriter writer = new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(32), sstableMetadataCollector);
 
             for (int i = 0; i < 20; i++)
                 writer.write("x".getBytes());
@@ -110,7 +110,7 @@ public class CompressedRandomAccessReaderTest
         {
             MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)).replayPosition(null);
             SequentialWriter writer = compressed
-                ? new CompressedSequentialWriter(f, filename + ".metadata", CompressionParameters.snappy(), sstableMetadataCollector)
+                ? new CompressedSequentialWriter(f, filename + ".metadata", CompressionParams.snappy(), sstableMetadataCollector)
                 : SequentialWriter.open(f);
 
             writer.write("The quick ".getBytes());
@@ -162,7 +162,7 @@ public class CompressedRandomAccessReaderTest
         metadata.deleteOnExit();
 
         MetadataCollector sstableMetadataCollector = new MetadataCollector(new ClusteringComparator(BytesType.instance)).replayPosition(null);
-        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), CompressionParameters.snappy(), sstableMetadataCollector))
+        try (SequentialWriter writer = new CompressedSequentialWriter(file, metadata.getPath(), CompressionParams.snappy(), sstableMetadataCollector))
         {
             writer.write(CONTENT.getBytes());
             writer.finish();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
index e5a7499..db99317 100644
--- a/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/compress/CompressedSequentialWriterTest.java
@@ -40,10 +40,11 @@ import org.apache.cassandra.io.util.ChannelProxy;
 import org.apache.cassandra.io.util.FileMark;
 import org.apache.cassandra.io.util.RandomAccessReader;
 import org.apache.cassandra.io.util.SequentialWriterTest;
+import org.apache.cassandra.schema.CompressionParams;
 
 public class CompressedSequentialWriterTest extends SequentialWriterTest
 {
-    private CompressionParameters compressionParameters;
+    private CompressionParams compressionParameters;
 
     private void runTests(String testName) throws IOException
     {
@@ -51,30 +52,30 @@ public class CompressedSequentialWriterTest extends SequentialWriterTest
         testWrite(File.createTempFile(testName + "_small", "1"), 25);
 
         // Test to confirm pipeline w/chunk-aligned data writes works
-        testWrite(File.createTempFile(testName + "_chunkAligned", "1"), CompressionParameters.DEFAULT_CHUNK_LENGTH);
+        testWrite(File.createTempFile(testName + "_chunkAligned", "1"), CompressionParams.DEFAULT_CHUNK_LENGTH);
 
         // Test to confirm pipeline on non-chunk boundaries works
-        testWrite(File.createTempFile(testName + "_large", "1"), CompressionParameters.DEFAULT_CHUNK_LENGTH * 3 + 100);
+        testWrite(File.createTempFile(testName + "_large", "1"), CompressionParams.DEFAULT_CHUNK_LENGTH * 3 + 100);
     }
 
     @Test
     public void testLZ4Writer() throws IOException
     {
-        compressionParameters = CompressionParameters.lz4();
+        compressionParameters = CompressionParams.lz4();
         runTests("LZ4");
     }
 
     @Test
     public void testDeflateWriter() throws IOException
     {
-        compressionParameters = CompressionParameters.deflate();
+        compressionParameters = CompressionParams.deflate();
         runTests("Deflate");
     }
 
     @Test
     public void testSnappyWriter() throws IOException
     {
-        compressionParameters = CompressionParameters.snappy();
+        compressionParameters = CompressionParams.snappy();
         runTests("Snappy");
     }
 
@@ -104,7 +105,7 @@ public class CompressedSequentialWriterTest extends SequentialWriterTest
                 FileMark mark = writer.mark();
 
                 // Write enough garbage to transition chunk
-                for (int i = 0; i < CompressionParameters.DEFAULT_CHUNK_LENGTH; i++)
+                for (int i = 0; i < CompressionParams.DEFAULT_CHUNK_LENGTH; i++)
                 {
                     writer.write((byte)i);
                 }
@@ -177,7 +178,7 @@ public class CompressedSequentialWriterTest extends SequentialWriterTest
         {
             this(file, offsetsFile, new CompressedSequentialWriter(file,
                                                                    offsetsFile.getPath(),
-                                                                   CompressionParameters.lz4(BUFFER_SIZE),
+                                                                   CompressionParams.lz4(BUFFER_SIZE),
                                                                    new MetadataCollector(new ClusteringComparator(UTF8Type.instance))));
         }
 


[5/5] cassandra git commit: Factor out TableParams from CFMetaData

Posted by al...@apache.org.
Factor out TableParams from CFMetaData

patch by Aleksey Yeschenko; reviewed by Robert Stupp for CASSANDRA-9712


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/b31845c4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/b31845c4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/b31845c4

Branch: refs/heads/cassandra-3.0
Commit: b31845c4a7982358a7c5bfd9bcf572fda6c1bfa9
Parents: 6932bd8
Author: Aleksey Yeschenko <al...@apache.org>
Authored: Sat Jul 18 01:59:00 2015 +0300
Committer: Aleksey Yeschenko <al...@apache.org>
Committed: Tue Aug 4 12:12:34 2015 +0300

----------------------------------------------------------------------
 NEWS.txt                                        |   8 +-
 .../apache/cassandra/cache/CachingOptions.java  | 291 ----------
 .../org/apache/cassandra/config/CFMetaData.java | 540 +++++------------
 src/java/org/apache/cassandra/cql3/Cql.g        |  12 +-
 .../AlterMaterializedViewStatement.java         |  16 +-
 .../cql3/statements/AlterTableStatement.java    |  19 +-
 .../cassandra/cql3/statements/CFPropDefs.java   | 222 -------
 .../cassandra/cql3/statements/CFProperties.java |   4 +-
 .../cql3/statements/CreateTableStatement.java   |  56 +-
 .../cql3/statements/KeyspaceAttributes.java     |  36 +-
 .../cql3/statements/TableAttributes.java        | 153 +++++
 .../apache/cassandra/db/ColumnFamilyStore.java  |  71 +--
 .../cassandra/db/HintedHandOffManager.java      |   2 +-
 .../org/apache/cassandra/db/LivenessInfo.java   |   2 +-
 src/java/org/apache/cassandra/db/Memtable.java  |   2 +-
 .../apache/cassandra/db/RowUpdateBuilder.java   |   2 +-
 .../db/SinglePartitionReadCommand.java          |   4 +-
 .../org/apache/cassandra/db/SystemKeyspace.java |  21 +-
 .../cassandra/db/commitlog/CommitLog.java       |   4 +-
 .../db/commitlog/CommitLogArchiver.java         |   5 +-
 .../db/commitlog/CommitLogReplayer.java         |   4 +-
 .../db/compaction/CompactionManager.java        |   4 +-
 .../compaction/CompactionStrategyManager.java   |  36 +-
 .../DateTieredCompactionStrategy.java           |  12 +-
 .../cassandra/db/compaction/Scrubber.java       |   2 +-
 .../SizeTieredCompactionStrategy.java           |  14 +-
 .../cassandra/db/compaction/Upgrader.java       |   2 +-
 .../apache/cassandra/db/rows/BufferCell.java    |   4 +-
 .../cassandra/db/view/MaterializedView.java     |   5 +-
 .../dht/OrderPreservingPartitioner.java         |   2 +-
 .../apache/cassandra/hadoop/ConfigHelper.java   |   6 +-
 .../io/compress/CompressedSequentialWriter.java |   4 +-
 .../io/compress/CompressionMetadata.java        |  15 +-
 .../io/compress/CompressionParameters.java      | 564 ------------------
 .../cassandra/io/compress/LZ4Compressor.java    |   3 +-
 .../io/sstable/IndexSummaryManager.java         |   4 +-
 .../io/sstable/format/SSTableReader.java        |  26 +-
 .../io/sstable/format/SSTableWriter.java        |   6 +-
 .../io/sstable/format/big/BigTableWriter.java   |   6 +-
 .../cassandra/io/util/SequentialWriter.java     |   4 +-
 .../apache/cassandra/schema/CachingParams.java  | 196 +++++++
 .../cassandra/schema/CompactionParams.java      | 304 ++++++++++
 .../cassandra/schema/CompressionParams.java     | 579 +++++++++++++++++++
 .../apache/cassandra/schema/KeyspaceParams.java |  95 +--
 .../cassandra/schema/LegacySchemaMigrator.java  | 122 ++--
 .../cassandra/schema/ReplicationParams.java     | 106 ++++
 .../apache/cassandra/schema/SchemaKeyspace.java | 142 ++---
 .../cassandra/schema/SpeculativeRetryParam.java | 160 +++++
 .../apache/cassandra/schema/TableParams.java    | 338 +++++++++++
 .../cassandra/service/AbstractReadExecutor.java |   9 +-
 .../apache/cassandra/service/CacheService.java  |   2 +-
 .../cassandra/service/StorageService.java       |   2 +-
 .../streaming/compress/CompressionInfo.java     |  12 +-
 .../cassandra/thrift/CassandraServer.java       |   7 +-
 .../cassandra/thrift/ThriftConversion.java      | 181 ++++--
 .../org/apache/cassandra/utils/FBUtilities.java |   8 +-
 .../utils/NativeSSTableLoaderClient.java        |   2 +-
 .../db/compaction/LongCompactionsTest.java      |  10 +-
 .../LongLeveledCompactionStrategyTest.java      |   4 +-
 test/unit/org/apache/cassandra/MockSchema.java  |   4 +-
 .../unit/org/apache/cassandra/SchemaLoader.java |  50 +-
 .../apache/cassandra/config/CFMetaDataTest.java |   4 +-
 .../validation/entities/SecondaryIndexTest.java |   2 +-
 .../miscellaneous/CrcCheckChanceTest.java       |  12 +-
 .../validation/miscellaneous/OverflowTest.java  |   8 +-
 .../cql3/validation/operations/AlterTest.java   |  14 +-
 .../SelectOrderedPartitionerTest.java           |   2 +-
 .../apache/cassandra/db/HintedHandOffTest.java  |   2 -
 .../apache/cassandra/db/RangeTombstoneTest.java |   5 -
 .../apache/cassandra/db/RowCacheCQLTest.java    |   2 +-
 .../org/apache/cassandra/db/RowCacheTest.java   |   7 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |   2 +-
 .../org/apache/cassandra/db/VerifyTest.java     |  12 +-
 .../db/commitlog/CommitLogUpgradeTest.java      |   2 +-
 .../db/compaction/AntiCompactionTest.java       |   1 -
 .../compaction/BlacklistingCompactionsTest.java |   4 +-
 .../db/compaction/CompactionsPurgeTest.java     |   7 +-
 .../db/compaction/CompactionsTest.java          |  11 +-
 .../LeveledCompactionStrategyTest.java          |   9 +-
 .../db/compaction/OneCompactionTest.java        |   9 +-
 .../SizeTieredCompactionStrategyTest.java       |   5 +-
 .../db/view/MaterializedViewUtilsTest.java      |   6 +-
 .../CompressedRandomAccessReaderTest.java       |  12 +-
 .../CompressedSequentialWriterTest.java         |  17 +-
 .../io/sstable/IndexSummaryManagerTest.java     |  49 +-
 .../cassandra/io/sstable/SSTableReaderTest.java |   6 +-
 .../org/apache/cassandra/schema/DefsTest.java   |  23 +-
 .../schema/LegacySchemaMigratorTest.java        |  61 +-
 .../cassandra/schema/SchemaKeyspaceTest.java    |   6 +-
 .../service/StorageServiceServerTest.java       |  13 +-
 .../compression/CompressedInputStreamTest.java  |   5 +-
 91 files changed, 2625 insertions(+), 2219 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 0e0d7c4..1fcbb12 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -21,10 +21,10 @@ New features
    - Materialized Views, which allow for server-side denormalization, is now
      available. Materialized views provide an alternative to secondary indexes
      for non-primary key queries, and perform much better for indexing high
-     cardinality columns. 
-     See http://www.datastax.com/dev/blog/new-in-cassandra-3-0-materialized-views 
+     cardinality columns.
+     See http://www.datastax.com/dev/blog/new-in-cassandra-3-0-materialized-views
+
 
-   
 Upgrading
 ---------
    - 3.0 requires Java 8u20 or later.
@@ -56,6 +56,8 @@ Upgrading
    - The `sstable_compression` and `chunk_length_kb` compression options have been deprecated.
      The new options are `class` and `chunk_length_in_kb`. Disabling compression should now
      be done by setting the new option `enabled` to `false`.
+   - Only map syntax is now allowed for caching options. ALL/NONE/KEYS_ONLY/ROWS_ONLY syntax
+     has been deprecated since 2.1.0 and is being removed in 3.0.0.
 
 
 2.2

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cache/CachingOptions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cache/CachingOptions.java b/src/java/org/apache/cassandra/cache/CachingOptions.java
deleted file mode 100644
index 686f365..0000000
--- a/src/java/org/apache/cassandra/cache/CachingOptions.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.cache;
-
-import java.util.*;
-
-import org.apache.commons.lang3.StringUtils;
-
-import org.apache.cassandra.exceptions.ConfigurationException;
-
-import static org.apache.cassandra.utils.FBUtilities.fromJsonMap;
-
-/*
- * CQL: { 'keys' : 'ALL|NONE', 'rows_per_partition': '200|NONE|ALL' }
- */
-public class CachingOptions
-{
-    public static final CachingOptions KEYS_ONLY = new CachingOptions(new KeyCache(KeyCache.Type.ALL), new RowCache(RowCache.Type.NONE));
-    public static final CachingOptions ALL = new CachingOptions(new KeyCache(KeyCache.Type.ALL), new RowCache(RowCache.Type.ALL));
-    public static final CachingOptions ROWS_ONLY = new CachingOptions(new KeyCache(KeyCache.Type.NONE), new RowCache(RowCache.Type.ALL));
-    public static final CachingOptions NONE = new CachingOptions(new KeyCache(KeyCache.Type.NONE), new RowCache(RowCache.Type.NONE));
-
-    public final KeyCache keyCache;
-    public final RowCache rowCache;
-    private static final Set<String> legacyOptions = new HashSet<>(Arrays.asList("ALL", "NONE", "KEYS_ONLY", "ROWS_ONLY"));
-
-    public CachingOptions(KeyCache kc, RowCache rc)
-    {
-        this.keyCache = kc;
-        this.rowCache = rc;
-    }
-
-    public static CachingOptions fromString(String cache) throws ConfigurationException
-    {
-        if (legacyOptions.contains(cache.toUpperCase()))
-            return fromLegacyOption(cache.toUpperCase());
-        return fromMap(fromJsonMap(cache));
-    }
-
-    public static CachingOptions fromMap(Map<String, String> cacheConfig) throws ConfigurationException
-    {
-        validateCacheConfig(cacheConfig);
-        if (!cacheConfig.containsKey("keys") && !cacheConfig.containsKey("rows_per_partition"))
-            return CachingOptions.NONE;
-        if (!cacheConfig.containsKey("keys"))
-            return new CachingOptions(new KeyCache(KeyCache.Type.NONE), RowCache.fromString(cacheConfig.get("rows_per_partition")));
-        if (!cacheConfig.containsKey("rows_per_partition"))
-            return CachingOptions.KEYS_ONLY;
-
-        return new CachingOptions(KeyCache.fromString(cacheConfig.get("keys")), RowCache.fromString(cacheConfig.get("rows_per_partition")));
-    }
-
-    public Map<String, String> asMap()
-    {
-        Map<String, String> map = new HashMap<>(2);
-        map.put("keys", keyCache.toString());
-        map.put("rows_per_partition", rowCache.toString());
-        return map;
-    }
-
-    private static void validateCacheConfig(Map<String, String> cacheConfig) throws ConfigurationException
-    {
-        for (Map.Entry<String, String> entry : cacheConfig.entrySet())
-        {
-            String value = entry.getValue().toUpperCase();
-            if (entry.getKey().equals("keys"))
-            {
-                if (!(value.equals("ALL") || value.equals("NONE")))
-                {
-                    throw new ConfigurationException("'keys' can only have values 'ALL' or 'NONE', but was '" + value + "'");
-                }
-            }
-            else if (entry.getKey().equals("rows_per_partition"))
-            {
-                if (!(value.equals("ALL") || value.equals("NONE") || StringUtils.isNumeric(value)))
-                {
-                    throw new ConfigurationException("'rows_per_partition' can only have values 'ALL', 'NONE' or be numeric, but was '" + value + "'.");
-                }
-            }
-            else
-                throw new ConfigurationException("Only supported CachingOptions parameters are 'keys' and 'rows_per_partition', but was '" + entry.getKey() + "'");
-        }
-    }
-
-    @Override
-    public String toString()
-    {
-        return String.format("{\"keys\":\"%s\", \"rows_per_partition\":\"%s\"}", keyCache.toString(), rowCache.toString());
-    }
-
-    private static CachingOptions fromLegacyOption(String cache)
-    {
-        if (cache.equals("ALL"))
-            return ALL;
-        if (cache.equals("KEYS_ONLY"))
-            return KEYS_ONLY;
-        if (cache.equals("ROWS_ONLY"))
-            return ROWS_ONLY;
-        return NONE;
-    }
-
-    @Override
-    public boolean equals(Object o)
-    {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-
-        CachingOptions o2 = (CachingOptions) o;
-
-        if (!keyCache.equals(o2.keyCache)) return false;
-        if (!rowCache.equals(o2.rowCache)) return false;
-
-        return true;
-    }
-
-    @Override
-    public int hashCode()
-    {
-        int result = keyCache.hashCode();
-        result = 31 * result + rowCache.hashCode();
-        return result;
-    }
-
-    // FIXME: move to ThriftConversion
-    public static CachingOptions fromThrift(String caching, String cellsPerRow) throws ConfigurationException
-    {
-
-        RowCache rc = new RowCache(RowCache.Type.NONE);
-        KeyCache kc = new KeyCache(KeyCache.Type.ALL);
-        // if we get a caching string from thrift it is legacy, "ALL", "KEYS_ONLY" etc, fromString handles those
-        if (caching != null)
-        {
-            CachingOptions givenOptions = CachingOptions.fromString(caching);
-            rc = givenOptions.rowCache;
-            kc = givenOptions.keyCache;
-        }
-        // if we get cells_per_row from thrift, it is either "ALL" or "<number of cells to cache>".
-        if (cellsPerRow != null && rc.isEnabled())
-            rc = RowCache.fromString(cellsPerRow);
-        return new CachingOptions(kc, rc);
-    }
-
-    // FIXME: move to ThriftConversion
-    public String toThriftCaching()
-    {
-        if (rowCache.isEnabled() && keyCache.isEnabled())
-            return "ALL";
-        if (rowCache.isEnabled())
-            return "ROWS_ONLY";
-        if (keyCache.isEnabled())
-            return "KEYS_ONLY";
-        return "NONE";
-    }
-
-    // FIXME: move to ThriftConversion
-    public String toThriftCellsPerRow()
-    {
-        if (rowCache.cacheFullPartitions())
-            return "ALL";
-        return String.valueOf(rowCache.rowsToCache);
-    }
-
-    public static class KeyCache
-    {
-        public final Type type;
-        public KeyCache(Type type)
-        {
-            this.type = type;
-        }
-
-        public enum Type
-        {
-            ALL, NONE
-        }
-        public static KeyCache fromString(String keyCache)
-        {
-            return new KeyCache(Type.valueOf(keyCache.toUpperCase()));
-        }
-
-        public boolean isEnabled()
-        {
-            return type == Type.ALL;
-        }
-
-        @Override
-        public boolean equals(Object o)
-        {
-            if (this == o) return true;
-            if (o == null || getClass() != o.getClass()) return false;
-
-            KeyCache keyCache = (KeyCache) o;
-
-            if (type != keyCache.type) return false;
-
-            return true;
-        }
-
-        @Override
-        public int hashCode()
-        {
-            return type.hashCode();
-        }
-        @Override
-        public String toString()
-        {
-            return type.toString();
-        }
-    }
-
-    public static class RowCache
-    {
-        public final Type type;
-        public final int rowsToCache;
-
-        public RowCache(Type type)
-        {
-            this(type, (type == Type.ALL) ? Integer.MAX_VALUE : 0);
-        }
-        public RowCache(Type type, int rowsToCache)
-        {
-            this.type = type;
-            this.rowsToCache = rowsToCache;
-        }
-
-        public enum Type
-        {
-            ALL, NONE, HEAD
-        }
-
-        public static RowCache fromString(String rowCache)
-        {
-            if (rowCache == null || rowCache.equalsIgnoreCase("none"))
-                return new RowCache(Type.NONE, 0);
-            else if (rowCache.equalsIgnoreCase("all"))
-                return new RowCache(Type.ALL, Integer.MAX_VALUE);
-            return new RowCache(Type.HEAD, Integer.parseInt(rowCache));
-        }
-        public boolean isEnabled()
-        {
-            return (type == Type.ALL) || (type == Type.HEAD);
-        }
-        public boolean cacheFullPartitions()
-        {
-            return type == Type.ALL;
-        }
-        @Override
-        public String toString()
-        {
-            if (type == Type.ALL) return "ALL";
-            if (type == Type.NONE) return "NONE";
-            return String.valueOf(rowsToCache);
-        }
-
-        @Override
-        public boolean equals(Object o)
-        {
-            if (this == o) return true;
-            if (o == null || getClass() != o.getClass()) return false;
-
-            RowCache rowCache = (RowCache) o;
-
-            if (rowsToCache != rowCache.rowsToCache) return false;
-            if (type != rowCache.type) return false;
-
-            return true;
-        }
-
-        @Override
-        public int hashCode()
-        {
-            int result = type.hashCode();
-            result = 31 * result + rowsToCache;
-            return result;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/config/CFMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java
index 43c95ea..0982109 100644
--- a/src/java/org/apache/cassandra/config/CFMetaData.java
+++ b/src/java/org/apache/cassandra/config/CFMetaData.java
@@ -24,12 +24,12 @@ import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Objects;
-import com.google.common.base.Strings;
 import com.google.common.collect.*;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
@@ -37,7 +37,6 @@ import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.statements.CFStatement;
@@ -48,13 +47,9 @@ import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.exceptions.*;
-import org.apache.cassandra.io.compress.CompressionParameters;
-import org.apache.cassandra.io.compress.LZ4Compressor;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.schema.MaterializedViews;
-import org.apache.cassandra.schema.SchemaKeyspace;
-import org.apache.cassandra.schema.Triggers;
+import org.apache.cassandra.schema.*;
 import org.apache.cassandra.utils.*;
 import org.github.jamm.Unmetered;
 
@@ -66,113 +61,13 @@ public final class CFMetaData
 {
     public enum Flag
     {
-        SUPER, COUNTER, DENSE, COMPOUND, MATERIALIZEDVIEW
+        SUPER, COUNTER, DENSE, COMPOUND, VIEW
     }
 
     private static final Logger logger = LoggerFactory.getLogger(CFMetaData.class);
 
     public static final Serializer serializer = new Serializer();
 
-    public final static double DEFAULT_READ_REPAIR_CHANCE = 0.0;
-    public final static double DEFAULT_DCLOCAL_READ_REPAIR_CHANCE = 0.1;
-    public final static int DEFAULT_GC_GRACE_SECONDS = 864000;
-    public final static int DEFAULT_MIN_COMPACTION_THRESHOLD = 4;
-    public final static int DEFAULT_MAX_COMPACTION_THRESHOLD = 32;
-    public final static Class<? extends AbstractCompactionStrategy> DEFAULT_COMPACTION_STRATEGY_CLASS = SizeTieredCompactionStrategy.class;
-    public final static CachingOptions DEFAULT_CACHING_STRATEGY = CachingOptions.KEYS_ONLY;
-    public final static int DEFAULT_DEFAULT_TIME_TO_LIVE = 0;
-    public final static SpeculativeRetry DEFAULT_SPECULATIVE_RETRY = new SpeculativeRetry(SpeculativeRetry.RetryType.PERCENTILE, 0.99);
-    public final static int DEFAULT_MIN_INDEX_INTERVAL = 128;
-    public final static int DEFAULT_MAX_INDEX_INTERVAL = 2048;
-
-    // Note that this is the default only for user created tables
-    public final static String DEFAULT_COMPRESSOR = LZ4Compressor.class.getCanonicalName();
-
-    // Note that this need to come *before* any CFMetaData is defined so before the compile below.
-    private static final Comparator<ColumnDefinition> regularColumnComparator = new Comparator<ColumnDefinition>()
-    {
-        public int compare(ColumnDefinition def1, ColumnDefinition def2)
-        {
-            return ByteBufferUtil.compareUnsigned(def1.name.bytes, def2.name.bytes);
-        }
-    };
-
-    public static class SpeculativeRetry
-    {
-        public enum RetryType
-        {
-            NONE, CUSTOM, PERCENTILE, ALWAYS
-        }
-
-        public final RetryType type;
-        public final double value;
-
-        private SpeculativeRetry(RetryType type, double value)
-        {
-            this.type = type;
-            this.value = value;
-        }
-
-        public static SpeculativeRetry fromString(String retry) throws ConfigurationException
-        {
-            String name = retry.toUpperCase();
-            try
-            {
-                if (name.endsWith(RetryType.PERCENTILE.toString()))
-                {
-                    double value = Double.parseDouble(name.substring(0, name.length() - 10));
-                    if (value > 100 || value < 0)
-                        throw new ConfigurationException("PERCENTILE should be between 0 and 100, but was " + value);
-                    return new SpeculativeRetry(RetryType.PERCENTILE, (value / 100));
-                }
-                else if (name.endsWith("MS"))
-                {
-                    double value = Double.parseDouble(name.substring(0, name.length() - 2));
-                    return new SpeculativeRetry(RetryType.CUSTOM, value);
-                }
-                else
-                {
-                    return new SpeculativeRetry(RetryType.valueOf(name), 0);
-                }
-            }
-            catch (IllegalArgumentException e)
-            {
-                // ignore to throw the below exception.
-            }
-            throw new ConfigurationException("invalid speculative_retry type: " + retry);
-        }
-
-        @Override
-        public boolean equals(Object obj)
-        {
-            if (!(obj instanceof SpeculativeRetry))
-                return false;
-            SpeculativeRetry rhs = (SpeculativeRetry) obj;
-            return Objects.equal(type, rhs.type) && Objects.equal(value, rhs.value);
-        }
-
-        @Override
-        public int hashCode()
-        {
-            return Objects.hashCode(type, value);
-        }
-
-        @Override
-        public String toString()
-        {
-            switch (type)
-            {
-            case PERCENTILE:
-                // TODO switch to BigDecimal so round-tripping isn't lossy
-                return (value * 100) + "PERCENTILE";
-            case CUSTOM:
-                return value + "ms";
-            default:
-                return type.toString();
-            }
-        }
-    }
-
     //REQUIRED
     public final UUID cfId;                           // internal id, never exposed to user
     public final String ksName;                       // name of keyspace
@@ -192,21 +87,10 @@ public final class CFMetaData
 
     private final Serializers serializers;
 
-    //OPTIONAL
-    private volatile String comment = "";
-    private volatile double readRepairChance = DEFAULT_READ_REPAIR_CHANCE;
-    private volatile double dcLocalReadRepairChance = DEFAULT_DCLOCAL_READ_REPAIR_CHANCE;
-    private volatile int gcGraceSeconds = DEFAULT_GC_GRACE_SECONDS;
+    // non-final, for now
+    public volatile TableParams params = TableParams.DEFAULT;
+
     private volatile AbstractType<?> keyValidator = BytesType.instance;
-    private volatile int minCompactionThreshold = DEFAULT_MIN_COMPACTION_THRESHOLD;
-    private volatile int maxCompactionThreshold = DEFAULT_MAX_COMPACTION_THRESHOLD;
-    private volatile Double bloomFilterFpChance = null;
-    private volatile CachingOptions caching = DEFAULT_CACHING_STRATEGY;
-    private volatile int minIndexInterval = DEFAULT_MIN_INDEX_INTERVAL;
-    private volatile int maxIndexInterval = DEFAULT_MAX_INDEX_INTERVAL;
-    private volatile int memtableFlushPeriod = 0;
-    private volatile int defaultTimeToLive = DEFAULT_DEFAULT_TIME_TO_LIVE;
-    private volatile SpeculativeRetry speculativeRetry = DEFAULT_SPECULATIVE_RETRY;
     private volatile Map<ByteBuffer, DroppedColumn> droppedColumns = new HashMap<>();
     private volatile Triggers triggers = Triggers.none();
     private volatile MaterializedViews materializedViews = MaterializedViews.none();
@@ -228,31 +112,110 @@ public final class CFMetaData
     // for those tables in practice).
     private volatile ColumnDefinition compactValueColumn;
 
-    public volatile Class<? extends AbstractCompactionStrategy> compactionStrategyClass = DEFAULT_COMPACTION_STRATEGY_CLASS;
-    public volatile Map<String, String> compactionStrategyOptions = new HashMap<>();
-
-    public volatile CompressionParameters compressionParameters = CompressionParameters.noCompression();
-
-    // attribute setters that return the modified CFMetaData instance
-    public CFMetaData comment(String prop) {comment = Strings.nullToEmpty(prop); return this;}
-    public CFMetaData readRepairChance(double prop) {readRepairChance = prop; return this;}
-    public CFMetaData dcLocalReadRepairChance(double prop) {dcLocalReadRepairChance = prop; return this;}
-    public CFMetaData gcGraceSeconds(int prop) {gcGraceSeconds = prop; return this;}
-    public CFMetaData minCompactionThreshold(int prop) {minCompactionThreshold = prop; return this;}
-    public CFMetaData maxCompactionThreshold(int prop) {maxCompactionThreshold = prop; return this;}
-    public CFMetaData compactionStrategyClass(Class<? extends AbstractCompactionStrategy> prop) {compactionStrategyClass = prop; return this;}
-    public CFMetaData compactionStrategyOptions(Map<String, String> prop) {compactionStrategyOptions = prop; return this;}
-    public CFMetaData compressionParameters(CompressionParameters prop) {compressionParameters = prop; return this;}
-    public CFMetaData bloomFilterFpChance(double prop) {bloomFilterFpChance = prop; return this;}
-    public CFMetaData caching(CachingOptions prop) {caching = prop; return this;}
-    public CFMetaData minIndexInterval(int prop) {minIndexInterval = prop; return this;}
-    public CFMetaData maxIndexInterval(int prop) {maxIndexInterval = prop; return this;}
-    public CFMetaData memtableFlushPeriod(int prop) {memtableFlushPeriod = prop; return this;}
-    public CFMetaData defaultTimeToLive(int prop) {defaultTimeToLive = prop; return this;}
-    public CFMetaData speculativeRetry(SpeculativeRetry prop) {speculativeRetry = prop; return this;}
-    public CFMetaData droppedColumns(Map<ByteBuffer, DroppedColumn> cols) {droppedColumns = cols; return this;}
-    public CFMetaData triggers(Triggers prop) {triggers = prop; return this;}
-    public CFMetaData materializedViews(MaterializedViews prop) {materializedViews = prop; return this;}
+    /*
+     * All of these methods will go away once CFMetaData becomes completely immutable.
+     */
+    public CFMetaData params(TableParams params)
+    {
+        this.params = params;
+        return this;
+    }
+
+    public CFMetaData bloomFilterFpChance(double prop)
+    {
+        params = TableParams.builder(params).bloomFilterFpChance(prop).build();
+        return this;
+    }
+
+    public CFMetaData caching(CachingParams prop)
+    {
+        params = TableParams.builder(params).caching(prop).build();
+        return this;
+    }
+
+    public CFMetaData comment(String prop)
+    {
+        params = TableParams.builder(params).comment(prop).build();
+        return this;
+    }
+
+    public CFMetaData compaction(CompactionParams prop)
+    {
+        params = TableParams.builder(params).compaction(prop).build();
+        return this;
+    }
+
+    public CFMetaData compression(CompressionParams prop)
+    {
+        params = TableParams.builder(params).compression(prop).build();
+        return this;
+    }
+
+    public CFMetaData dcLocalReadRepairChance(double prop)
+    {
+        params = TableParams.builder(params).dcLocalReadRepairChance(prop).build();
+        return this;
+    }
+
+    public CFMetaData defaultTimeToLive(int prop)
+    {
+        params = TableParams.builder(params).defaultTimeToLive(prop).build();
+        return this;
+    }
+
+    public CFMetaData gcGraceSeconds(int prop)
+    {
+        params = TableParams.builder(params).gcGraceSeconds(prop).build();
+        return this;
+    }
+
+    public CFMetaData maxIndexInterval(int prop)
+    {
+        params = TableParams.builder(params).maxIndexInterval(prop).build();
+        return this;
+    }
+
+    public CFMetaData memtableFlushPeriod(int prop)
+    {
+        params = TableParams.builder(params).memtableFlushPeriodInMs(prop).build();
+        return this;
+    }
+
+    public CFMetaData minIndexInterval(int prop)
+    {
+        params = TableParams.builder(params).minIndexInterval(prop).build();
+        return this;
+    }
+
+    public CFMetaData readRepairChance(double prop)
+    {
+        params = TableParams.builder(params).readRepairChance(prop).build();
+        return this;
+    }
+
+    public CFMetaData speculativeRetry(SpeculativeRetryParam prop)
+    {
+        params = TableParams.builder(params).speculativeRetry(prop).build();
+        return this;
+    }
+
+    public CFMetaData droppedColumns(Map<ByteBuffer, DroppedColumn> cols)
+    {
+        droppedColumns = cols;
+        return this;
+    }
+
+    public CFMetaData triggers(Triggers prop)
+    {
+        triggers = prop;
+        return this;
+    }
+
+    public CFMetaData materializedViews(MaterializedViews prop)
+    {
+        materializedViews = prop;
+        return this;
+    }
 
     private CFMetaData(String keyspace,
                        String name,
@@ -287,7 +250,7 @@ public final class CFMetaData
         if (isCompound)
             flags.add(Flag.COMPOUND);
         if (isMaterializedView)
-            flags.add(Flag.MATERIALIZEDVIEW);
+            flags.add(Flag.VIEW);
         this.flags = Sets.immutableEnumSet(flags);
 
         isIndex = cfName.contains(".");
@@ -414,15 +377,15 @@ public final class CFMetaData
         CFStatement parsed = (CFStatement)QueryProcessor.parseStatement(cql);
         parsed.prepareKeyspace(keyspace);
         CreateTableStatement statement = (CreateTableStatement) parsed.prepare().statement;
-        CFMetaData.Builder builder = statement.metadataBuilder();
-        builder.withId(generateLegacyCfId(keyspace, statement.columnFamily()));
-        CFMetaData cfm = builder.build();
-        statement.applyPropertiesTo(cfm);
 
-        return cfm.readRepairChance(0)
-                  .dcLocalReadRepairChance(0)
-                  .gcGraceSeconds(0)
-                  .memtableFlushPeriod(3600 * 1000);
+        return statement.metadataBuilder()
+                        .withId(generateLegacyCfId(keyspace, statement.columnFamily()))
+                        .build()
+                        .params(statement.params())
+                        .readRepairChance(0.0)
+                        .dcLocalReadRepairChance(0.0)
+                        .gcGraceSeconds(0)
+                        .memtableFlushPeriod((int) TimeUnit.HOURS.toMillis(1));
     }
 
     /**
@@ -438,22 +401,20 @@ public final class CFMetaData
 
     public CFMetaData reloadIndexMetadataProperties(CFMetaData parent)
     {
+        TableParams.Builder indexParams = TableParams.builder(parent.params);
+
         // Depends on parent's cache setting, turn on its index CF's cache.
         // Row caching is never enabled; see CASSANDRA-5732
-        CachingOptions indexCaching = parent.getCaching().keyCache.isEnabled()
-                                    ? CachingOptions.KEYS_ONLY
-                                    : CachingOptions.NONE;
+        if (parent.params.caching.cacheKeys())
+            indexParams.caching(CachingParams.CACHE_KEYS);
+        else
+            indexParams.caching(CachingParams.CACHE_NOTHING);
 
-        return this.readRepairChance(0.0)
+        indexParams.readRepairChance(0.0)
                    .dcLocalReadRepairChance(0.0)
-                   .gcGraceSeconds(0)
-                   .caching(indexCaching)
-                   .speculativeRetry(parent.speculativeRetry)
-                   .minCompactionThreshold(parent.minCompactionThreshold)
-                   .maxCompactionThreshold(parent.maxCompactionThreshold)
-                   .compactionStrategyClass(parent.compactionStrategyClass)
-                   .compactionStrategyOptions(parent.compactionStrategyOptions)
-                   .compressionParameters(parent.compressionParameters);
+                   .gcGraceSeconds(0);
+
+        return params(indexParams.build());
     }
 
     public CFMetaData copy()
@@ -520,22 +481,7 @@ public final class CFMetaData
     @VisibleForTesting
     public static CFMetaData copyOpts(CFMetaData newCFMD, CFMetaData oldCFMD)
     {
-        return newCFMD.comment(oldCFMD.comment)
-                      .readRepairChance(oldCFMD.readRepairChance)
-                      .dcLocalReadRepairChance(oldCFMD.dcLocalReadRepairChance)
-                      .gcGraceSeconds(oldCFMD.gcGraceSeconds)
-                      .minCompactionThreshold(oldCFMD.minCompactionThreshold)
-                      .maxCompactionThreshold(oldCFMD.maxCompactionThreshold)
-                      .compactionStrategyClass(oldCFMD.compactionStrategyClass)
-                      .compactionStrategyOptions(new HashMap<>(oldCFMD.compactionStrategyOptions))
-                      .compressionParameters(oldCFMD.compressionParameters.copy())
-                      .bloomFilterFpChance(oldCFMD.getBloomFilterFpChance())
-                      .caching(oldCFMD.caching)
-                      .defaultTimeToLive(oldCFMD.defaultTimeToLive)
-                      .minIndexInterval(oldCFMD.minIndexInterval)
-                      .maxIndexInterval(oldCFMD.maxIndexInterval)
-                      .speculativeRetry(oldCFMD.speculativeRetry)
-                      .memtableFlushPeriod(oldCFMD.memtableFlushPeriod)
+        return newCFMD.params(oldCFMD.params)
                       .droppedColumns(new HashMap<>(oldCFMD.droppedColumns))
                       .triggers(oldCFMD.triggers)
                       .materializedViews(oldCFMD.materializedViews);
@@ -555,11 +501,6 @@ public final class CFMetaData
         return cfName + Directories.SECONDARY_INDEX_NAME_SEPARATOR + (info.getIndexName() == null ? ByteBufferUtil.bytesToHex(info.name.bytes) : info.getIndexName());
     }
 
-    public String getComment()
-    {
-        return comment;
-    }
-
     /**
      * The '.' char is the only way to identify if the CFMetadata is for a secondary index
      */
@@ -595,23 +536,13 @@ public final class CFMetaData
         return isIndex ? cfName.substring(0, cfName.indexOf('.')) : null;
     }
 
-    public double getReadRepairChance()
-    {
-        return readRepairChance;
-    }
-
-    public double getDcLocalReadRepairChance()
-    {
-        return dcLocalReadRepairChance;
-    }
-
     public ReadRepairDecision newReadRepairDecision()
     {
         double chance = ThreadLocalRandom.current().nextDouble();
-        if (getReadRepairChance() > chance)
+        if (params.readRepairChance > chance)
             return ReadRepairDecision.GLOBAL;
 
-        if (getDcLocalReadRepairChance() > chance)
+        if (params.dcLocalReadRepairChance > chance)
             return ReadRepairDecision.DC_LOCAL;
 
         return ReadRepairDecision.NONE;
@@ -624,31 +555,11 @@ public final class CFMetaData
              : UTF8Type.instance;
     }
 
-    public int getGcGraceSeconds()
-    {
-        return gcGraceSeconds;
-    }
-
     public AbstractType<?> getKeyValidator()
     {
         return keyValidator;
     }
 
-    public int getMinCompactionThreshold()
-    {
-        return minCompactionThreshold;
-    }
-
-    public int getMaxCompactionThreshold()
-    {
-        return maxCompactionThreshold;
-    }
-
-    public CompressionParameters compressionParameters()
-    {
-        return compressionParameters;
-    }
-
     public Collection<ColumnDefinition> allColumns()
     {
         return columnMetadata.values();
@@ -728,44 +639,6 @@ public final class CFMetaData
         return CompositeType.build(values);
     }
 
-    public double getBloomFilterFpChance()
-    {
-        // we disallow bFFPC==null starting in 1.2.1 but tolerated it before that
-        return (bloomFilterFpChance == null || bloomFilterFpChance == 0)
-               ? compactionStrategyClass == LeveledCompactionStrategy.class ? 0.1 : 0.01
-               : bloomFilterFpChance;
-    }
-
-    public CachingOptions getCaching()
-    {
-        return caching;
-    }
-
-    public int getMinIndexInterval()
-    {
-        return minIndexInterval;
-    }
-
-    public int getMaxIndexInterval()
-    {
-        return maxIndexInterval;
-    }
-
-    public SpeculativeRetry getSpeculativeRetry()
-    {
-        return speculativeRetry;
-    }
-
-    public int getMemtableFlushPeriod()
-    {
-        return memtableFlushPeriod;
-    }
-
-    public int getDefaultTimeToLive()
-    {
-        return defaultTimeToLive;
-    }
-
     public Map<ByteBuffer, DroppedColumn> getDroppedColumns()
     {
         return droppedColumns;
@@ -803,25 +676,10 @@ public final class CFMetaData
             && Objects.equal(flags, other.flags)
             && Objects.equal(ksName, other.ksName)
             && Objects.equal(cfName, other.cfName)
+            && Objects.equal(params, other.params)
             && Objects.equal(comparator, other.comparator)
-            && Objects.equal(comment, other.comment)
-            && Objects.equal(readRepairChance, other.readRepairChance)
-            && Objects.equal(dcLocalReadRepairChance, other.dcLocalReadRepairChance)
-            && Objects.equal(gcGraceSeconds, other.gcGraceSeconds)
             && Objects.equal(keyValidator, other.keyValidator)
-            && Objects.equal(minCompactionThreshold, other.minCompactionThreshold)
-            && Objects.equal(maxCompactionThreshold, other.maxCompactionThreshold)
             && Objects.equal(columnMetadata, other.columnMetadata)
-            && Objects.equal(compactionStrategyClass, other.compactionStrategyClass)
-            && Objects.equal(compactionStrategyOptions, other.compactionStrategyOptions)
-            && Objects.equal(compressionParameters, other.compressionParameters)
-            && Objects.equal(getBloomFilterFpChance(), other.getBloomFilterFpChance())
-            && Objects.equal(memtableFlushPeriod, other.memtableFlushPeriod)
-            && Objects.equal(caching, other.caching)
-            && Objects.equal(defaultTimeToLive, other.defaultTimeToLive)
-            && Objects.equal(minIndexInterval, other.minIndexInterval)
-            && Objects.equal(maxIndexInterval, other.maxIndexInterval)
-            && Objects.equal(speculativeRetry, other.speculativeRetry)
             && Objects.equal(droppedColumns, other.droppedColumns)
             && Objects.equal(triggers, other.triggers)
             && Objects.equal(materializedViews, other.materializedViews);
@@ -836,24 +694,9 @@ public final class CFMetaData
             .append(cfName)
             .append(flags)
             .append(comparator)
-            .append(comment)
-            .append(readRepairChance)
-            .append(dcLocalReadRepairChance)
-            .append(gcGraceSeconds)
+            .append(params)
             .append(keyValidator)
-            .append(minCompactionThreshold)
-            .append(maxCompactionThreshold)
             .append(columnMetadata)
-            .append(compactionStrategyClass)
-            .append(compactionStrategyOptions)
-            .append(compressionParameters)
-            .append(getBloomFilterFpChance())
-            .append(memtableFlushPeriod)
-            .append(caching)
-            .append(defaultTimeToLive)
-            .append(minIndexInterval)
-            .append(maxIndexInterval)
-            .append(speculativeRetry)
             .append(droppedColumns)
             .append(triggers)
             .append(materializedViews)
@@ -893,30 +736,13 @@ public final class CFMetaData
         // compaction thresholds are checked by ThriftValidation. We shouldn't be doing
         // validation on the apply path; it's too late for that.
 
-        comment = Strings.nullToEmpty(cfm.comment);
-        readRepairChance = cfm.readRepairChance;
-        dcLocalReadRepairChance = cfm.dcLocalReadRepairChance;
-        gcGraceSeconds = cfm.gcGraceSeconds;
-        keyValidator = cfm.keyValidator;
-        minCompactionThreshold = cfm.minCompactionThreshold;
-        maxCompactionThreshold = cfm.maxCompactionThreshold;
+        params = cfm.params;
 
-        bloomFilterFpChance = cfm.getBloomFilterFpChance();
-        caching = cfm.caching;
-        minIndexInterval = cfm.minIndexInterval;
-        maxIndexInterval = cfm.maxIndexInterval;
-        memtableFlushPeriod = cfm.memtableFlushPeriod;
-        defaultTimeToLive = cfm.defaultTimeToLive;
-        speculativeRetry = cfm.speculativeRetry;
+        keyValidator = cfm.keyValidator;
 
         if (!cfm.droppedColumns.isEmpty())
             droppedColumns = cfm.droppedColumns;
 
-        compactionStrategyClass = cfm.compactionStrategyClass;
-        compactionStrategyOptions = cfm.compactionStrategyOptions;
-
-        compressionParameters = cfm.compressionParameters;
-
         triggers = cfm.triggers;
         materializedViews = cfm.materializedViews;
 
@@ -945,37 +771,6 @@ public final class CFMetaData
             throw new ConfigurationException(String.format("Column family comparators do not match or are not compatible (found %s; expected %s).", cfm.comparator.getClass().getSimpleName(), comparator.getClass().getSimpleName()));
     }
 
-    public static void validateCompactionOptions(Class<? extends AbstractCompactionStrategy> strategyClass, Map<String, String> options) throws ConfigurationException
-    {
-        try
-        {
-            if (options == null)
-                return;
-
-            Map<?,?> unknownOptions = (Map) strategyClass.getMethod("validateOptions", Map.class).invoke(null, options);
-            if (!unknownOptions.isEmpty())
-                throw new ConfigurationException(String.format("Properties specified %s are not understood by %s", unknownOptions.keySet(), strategyClass.getSimpleName()));
-        }
-        catch (NoSuchMethodException e)
-        {
-            logger.warn("Compaction Strategy {} does not have a static validateOptions method. Validation ignored", strategyClass.getName());
-        }
-        catch (InvocationTargetException e)
-        {
-            if (e.getTargetException() instanceof ConfigurationException)
-                throw (ConfigurationException) e.getTargetException();
-            throw new ConfigurationException("Failed to validate compaction options: " + options);
-        }
-        catch (ConfigurationException e)
-        {
-            throw e;
-        }
-        catch (Exception e)
-        {
-            throw new ConfigurationException("Failed to validate compaction options: " + options);
-        }
-    }
-
     public static Class<? extends AbstractCompactionStrategy> createCompactionStrategy(String className) throws ConfigurationException
     {
         className = className.contains(".") ? className : "org.apache.cassandra.db.compaction." + className;
@@ -991,8 +786,8 @@ public final class CFMetaData
         try
         {
             Constructor<? extends AbstractCompactionStrategy> constructor =
-                compactionStrategyClass.getConstructor(ColumnFamilyStore.class, Map.class);
-            return constructor.newInstance(cfs, compactionStrategyOptions);
+                params.compaction.klass().getConstructor(ColumnFamilyStore.class, Map.class);
+            return constructor.newInstance(cfs, params.compaction.options());
         }
         catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException e)
         {
@@ -1094,6 +889,8 @@ public final class CFMetaData
         if (!isNameValid(cfName))
             throw new ConfigurationException(String.format("ColumnFamily name must not be empty, more than %s characters long, or contain non-alphanumeric-underscore characters (got \"%s\")", Schema.NAME_LENGTH, cfName));
 
+        params.validate();
+
         for (int i = 0; i < comparator.size(); i++)
         {
             if (comparator.subtype(i) instanceof CounterColumnType)
@@ -1147,13 +944,6 @@ public final class CFMetaData
             }
         }
 
-        validateCompactionThresholds();
-
-        if (bloomFilterFpChance != null && bloomFilterFpChance == 0)
-            throw new ConfigurationException("Zero false positives is impossible; bloom filter false positive chance bffpc must be 0 < bffpc <= 1");
-
-        validateIndexIntervalThresholds();
-
         return this;
     }
 
@@ -1167,32 +957,6 @@ public final class CFMetaData
         return indexNames;
     }
 
-    private void validateCompactionThresholds() throws ConfigurationException
-    {
-        if (maxCompactionThreshold == 0)
-        {
-            logger.warn("Disabling compaction by setting max or min compaction has been deprecated, " +
-                    "set the compaction strategy option 'enabled' to 'false' instead");
-            return;
-        }
-
-        if (minCompactionThreshold <= 1)
-            throw new ConfigurationException(String.format("Min compaction threshold cannot be less than 2 (got %d).", minCompactionThreshold));
-
-        if (minCompactionThreshold > maxCompactionThreshold)
-            throw new ConfigurationException(String.format("Min compaction threshold (got %d) cannot be greater than max compaction threshold (got %d)",
-                                                            minCompactionThreshold, maxCompactionThreshold));
-    }
-
-    private void validateIndexIntervalThresholds() throws ConfigurationException
-    {
-        if (minIndexInterval <= 0)
-            throw new ConfigurationException(String.format("Min index interval must be greater than 0 (got %d).", minIndexInterval));
-        if (maxIndexInterval < minIndexInterval)
-            throw new ConfigurationException(String.format("Max index interval (%d) must be greater than the min index " +
-                                                           "interval (%d).", maxIndexInterval, minIndexInterval));
-    }
-
     // The comparator to validate the definition name with thrift.
     public AbstractType<?> thriftColumnNameType()
     {
@@ -1207,13 +971,6 @@ public final class CFMetaData
         return clusteringColumns.get(0).type;
     }
 
-    public CFMetaData addAllColumnDefinitions(Collection<ColumnDefinition> defs)
-    {
-        for (ColumnDefinition def : defs)
-            addOrReplaceColumnDefinition(def);
-        return this;
-    }
-
     public CFMetaData addColumnDefinition(ColumnDefinition def) throws ConfigurationException
     {
         if (columnMetadata.containsKey(def.name.bytes))
@@ -1415,28 +1172,13 @@ public final class CFMetaData
             .append("ksName", ksName)
             .append("cfName", cfName)
             .append("flags", flags)
+            .append("params", params)
             .append("comparator", comparator)
             .append("partitionColumns", partitionColumns)
             .append("partitionKeyColumns", partitionKeyColumns)
             .append("clusteringColumns", clusteringColumns)
-            .append("comment", comment)
-            .append("readRepairChance", readRepairChance)
-            .append("dcLocalReadRepairChance", dcLocalReadRepairChance)
-            .append("gcGraceSeconds", gcGraceSeconds)
             .append("keyValidator", keyValidator)
-            .append("minCompactionThreshold", minCompactionThreshold)
-            .append("maxCompactionThreshold", maxCompactionThreshold)
             .append("columnMetadata", columnMetadata.values())
-            .append("compactionStrategyClass", compactionStrategyClass)
-            .append("compactionStrategyOptions", compactionStrategyOptions)
-            .append("compressionParameters", compressionParameters.asMap())
-            .append("bloomFilterFpChance", getBloomFilterFpChance())
-            .append("memtableFlushPeriod", memtableFlushPeriod)
-            .append("caching", caching)
-            .append("defaultTimeToLive", defaultTimeToLive)
-            .append("minIndexInterval", minIndexInterval)
-            .append("maxIndexInterval", maxIndexInterval)
-            .append("speculativeRetry", speculativeRetry)
             .append("droppedColumns", droppedColumns)
             .append("triggers", triggers)
             .append("materializedViews", materializedViews)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/Cql.g
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/Cql.g b/src/java/org/apache/cassandra/cql3/Cql.g
index 0eadaee..a34eebb 100644
--- a/src/java/org/apache/cassandra/cql3/Cql.g
+++ b/src/java/org/apache/cassandra/cql3/Cql.g
@@ -807,7 +807,7 @@ alterKeyspaceStatement returns [AlterKeyspaceStatement expr]
 alterTableStatement returns [AlterTableStatement expr]
     @init {
         AlterTableStatement.Type type = null;
-        CFPropDefs props = new CFPropDefs();
+        TableAttributes attrs = new TableAttributes();
         Map<ColumnIdentifier.Raw, ColumnIdentifier.Raw> renames = new HashMap<ColumnIdentifier.Raw, ColumnIdentifier.Raw>();
         boolean isStatic = false;
     }
@@ -815,24 +815,24 @@ alterTableStatement returns [AlterTableStatement expr]
           ( K_ALTER id=cident K_TYPE v=comparatorType { type = AlterTableStatement.Type.ALTER; }
           | K_ADD   id=cident v=comparatorType ({ isStatic=true; } K_STATIC)? { type = AlterTableStatement.Type.ADD; }
           | K_DROP  id=cident                         { type = AlterTableStatement.Type.DROP; }
-          | K_WITH  properties[props]                 { type = AlterTableStatement.Type.OPTS; }
+          | K_WITH  properties[attrs]                 { type = AlterTableStatement.Type.OPTS; }
           | K_RENAME                                  { type = AlterTableStatement.Type.RENAME; }
                id1=cident K_TO toId1=cident { renames.put(id1, toId1); }
                ( K_AND idn=cident K_TO toIdn=cident { renames.put(idn, toIdn); } )*
           )
     {
-        $expr = new AlterTableStatement(cf, type, id, v, props, renames, isStatic);
+        $expr = new AlterTableStatement(cf, type, id, v, attrs, renames, isStatic);
     }
     ;
 
 alterMaterializedViewStatement returns [AlterMaterializedViewStatement expr]
     @init {
-        CFPropDefs props = new CFPropDefs();
+        TableAttributes attrs = new TableAttributes();
     }
     : K_ALTER K_MATERIALIZED K_VIEW name=columnFamilyName
-          K_WITH properties[props]
+          K_WITH properties[attrs]
     {
-        $expr = new AlterMaterializedViewStatement(name, props);
+        $expr = new AlterMaterializedViewStatement(name, attrs);
     }
     ;
     

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/AlterMaterializedViewStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterMaterializedViewStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterMaterializedViewStatement.java
index d2b1d13..d0116fb 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterMaterializedViewStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterMaterializedViewStatement.java
@@ -15,12 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.cassandra.cql3.statements;
 
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.CFName;
 import org.apache.cassandra.exceptions.InvalidRequestException;
 import org.apache.cassandra.exceptions.RequestValidationException;
@@ -33,13 +31,12 @@ import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
 
 public class AlterMaterializedViewStatement extends SchemaAlteringStatement
 {
-    private final CFPropDefs cfProps;
+    private final TableAttributes attrs;
 
-    public AlterMaterializedViewStatement(CFName name,
-                                          CFPropDefs cfProps)
+    public AlterMaterializedViewStatement(CFName name, TableAttributes attrs)
     {
         super(name);
-        this.cfProps = cfProps;
+        this.attrs = attrs;
     }
 
     public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
@@ -60,13 +57,12 @@ public class AlterMaterializedViewStatement extends SchemaAlteringStatement
 
         CFMetaData cfm = meta.copy();
 
-
-        if (cfProps == null)
+        if (attrs == null)
             throw new InvalidRequestException("ALTER MATERIALIZED VIEW WITH invoked, but no parameters found");
 
-        cfProps.validate();
+        attrs.validate();
+        cfm.params(attrs.asAlteredTableParams(cfm.params));
 
-        cfProps.applyToCFMetadata(cfm);
         MigrationManager.announceColumnFamilyUpdate(cfm, false, isLocalOnly);
         return true;
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
index 1495f2d..b7e09d9 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
@@ -29,6 +29,7 @@ import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.*;
+import org.apache.cassandra.schema.TableParams;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.MigrationManager;
 import org.apache.cassandra.transport.Event;
@@ -37,7 +38,7 @@ import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
 
 public class AlterTableStatement extends SchemaAlteringStatement
 {
-    public static enum Type
+    public enum Type
     {
         ADD, ALTER, DROP, OPTS, RENAME
     }
@@ -45,7 +46,7 @@ public class AlterTableStatement extends SchemaAlteringStatement
     public final Type oType;
     public final CQL3Type.Raw validator;
     public final ColumnIdentifier.Raw rawColumnName;
-    private final CFPropDefs cfProps;
+    private final TableAttributes attrs;
     private final Map<ColumnIdentifier.Raw, ColumnIdentifier.Raw> renames;
     private final boolean isStatic; // Only for ALTER ADD
 
@@ -53,7 +54,7 @@ public class AlterTableStatement extends SchemaAlteringStatement
                                Type type,
                                ColumnIdentifier.Raw columnName,
                                CQL3Type.Raw validator,
-                               CFPropDefs cfProps,
+                               TableAttributes attrs,
                                Map<ColumnIdentifier.Raw, ColumnIdentifier.Raw> renames,
                                boolean isStatic)
     {
@@ -61,7 +62,7 @@ public class AlterTableStatement extends SchemaAlteringStatement
         this.oType = type;
         this.rawColumnName = columnName;
         this.validator = validator; // used only for ADD/ALTER commands
-        this.cfProps = cfProps;
+        this.attrs = attrs;
         this.renames = renames;
         this.isStatic = isStatic;
     }
@@ -284,15 +285,17 @@ public class AlterTableStatement extends SchemaAlteringStatement
                                                                     builder.toString()));
                 break;
             case OPTS:
-                if (cfProps == null)
+                if (attrs == null)
                     throw new InvalidRequestException("ALTER TABLE WITH invoked, but no parameters found");
+                attrs.validate();
 
-                cfProps.validate();
+                TableParams params = attrs.asAlteredTableParams(cfm.params);
 
-                if (meta.isCounter() && cfProps.getDefaultTimeToLive() > 0)
+                if (meta.isCounter() && params.defaultTimeToLive > 0)
                     throw new InvalidRequestException("Cannot set default_time_to_live on a table with counters");
 
-                cfProps.applyToCFMetadata(cfm);
+                cfm.params(params);
+
                 break;
             case RENAME:
                 for (Map.Entry<ColumnIdentifier.Raw, ColumnIdentifier.Raw> entry : renames.entrySet())

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/CFPropDefs.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CFPropDefs.java b/src/java/org/apache/cassandra/cql3/statements/CFPropDefs.java
deleted file mode 100644
index 56db85a..0000000
--- a/src/java/org/apache/cassandra/cql3/statements/CFPropDefs.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.cql3.statements;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.cassandra.cache.CachingOptions;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.CFMetaData.SpeculativeRetry;
-import org.apache.cassandra.db.compaction.AbstractCompactionStrategy;
-import org.apache.cassandra.exceptions.ConfigurationException;
-import org.apache.cassandra.exceptions.SyntaxException;
-import org.apache.cassandra.io.compress.CompressionParameters;
-
-public class CFPropDefs extends PropertyDefinitions
-{
-    public static final String KW_COMMENT = "comment";
-    public static final String KW_READREPAIRCHANCE = "read_repair_chance";
-    public static final String KW_DCLOCALREADREPAIRCHANCE = "dclocal_read_repair_chance";
-    public static final String KW_GCGRACESECONDS = "gc_grace_seconds";
-    public static final String KW_MINCOMPACTIONTHRESHOLD = "min_threshold";
-    public static final String KW_MAXCOMPACTIONTHRESHOLD = "max_threshold";
-    public static final String KW_CACHING = "caching";
-    public static final String KW_DEFAULT_TIME_TO_LIVE = "default_time_to_live";
-    public static final String KW_MIN_INDEX_INTERVAL = "min_index_interval";
-    public static final String KW_MAX_INDEX_INTERVAL = "max_index_interval";
-    public static final String KW_SPECULATIVE_RETRY = "speculative_retry";
-    public static final String KW_BF_FP_CHANCE = "bloom_filter_fp_chance";
-    public static final String KW_MEMTABLE_FLUSH_PERIOD = "memtable_flush_period_in_ms";
-
-    public static final String KW_COMPACTION = "compaction";
-    public static final String KW_COMPRESSION = "compression";
-
-    public static final String COMPACTION_STRATEGY_CLASS_KEY = "class";
-
-    public static final Set<String> keywords = new HashSet<>();
-    public static final Set<String> obsoleteKeywords = new HashSet<>();
-
-    static
-    {
-        keywords.add(KW_COMMENT);
-        keywords.add(KW_READREPAIRCHANCE);
-        keywords.add(KW_DCLOCALREADREPAIRCHANCE);
-        keywords.add(KW_GCGRACESECONDS);
-        keywords.add(KW_CACHING);
-        keywords.add(KW_DEFAULT_TIME_TO_LIVE);
-        keywords.add(KW_MIN_INDEX_INTERVAL);
-        keywords.add(KW_MAX_INDEX_INTERVAL);
-        keywords.add(KW_SPECULATIVE_RETRY);
-        keywords.add(KW_BF_FP_CHANCE);
-        keywords.add(KW_COMPACTION);
-        keywords.add(KW_COMPRESSION);
-        keywords.add(KW_MEMTABLE_FLUSH_PERIOD);
-
-        obsoleteKeywords.add("index_interval");
-        obsoleteKeywords.add("replicate_on_write");
-        obsoleteKeywords.add("populate_io_cache_on_flush");
-    }
-
-    private Class<? extends AbstractCompactionStrategy> compactionStrategyClass = null;
-
-    public void validate() throws ConfigurationException, SyntaxException
-    {
-        // Skip validation if the comapction strategy class is already set as it means we've alreayd
-        // prepared (and redoing it would set strategyClass back to null, which we don't want)
-        if (compactionStrategyClass != null)
-            return;
-
-        validate(keywords, obsoleteKeywords);
-
-        Map<String, String> compactionOptions = getCompactionOptions();
-        if (!compactionOptions.isEmpty())
-        {
-            String strategy = compactionOptions.get(COMPACTION_STRATEGY_CLASS_KEY);
-            if (strategy == null)
-                throw new ConfigurationException("Missing sub-option '" + COMPACTION_STRATEGY_CLASS_KEY + "' for the '" + KW_COMPACTION + "' option.");
-
-            compactionStrategyClass = CFMetaData.createCompactionStrategy(strategy);
-            compactionOptions.remove(COMPACTION_STRATEGY_CLASS_KEY);
-
-            CFMetaData.validateCompactionOptions(compactionStrategyClass, compactionOptions);
-        }
-
-        Map<String, String> compressionOptions = getCompressionOptions();
-        if (!compressionOptions.isEmpty())
-        {
-            if (CompressionParameters.isEnabled(compressionOptions)
-                && !CompressionParameters.containsSstableCompressionClass(compressionOptions))
-            {
-                throw new ConfigurationException("Missing sub-option '" + CompressionParameters.CLASS + "' for the '" + KW_COMPRESSION + "' option.");
-            }
-
-            CompressionParameters compressionParameters = CompressionParameters.fromMap(compressionOptions);
-            compressionParameters.validate();
-        }
-
-        validateMinimumInt(KW_DEFAULT_TIME_TO_LIVE, 0, CFMetaData.DEFAULT_DEFAULT_TIME_TO_LIVE);
-
-        Integer minIndexInterval = getInt(KW_MIN_INDEX_INTERVAL, null);
-        Integer maxIndexInterval = getInt(KW_MAX_INDEX_INTERVAL, null);
-        if (minIndexInterval != null && minIndexInterval < 1)
-            throw new ConfigurationException(KW_MIN_INDEX_INTERVAL + " must be greater than 0, but was " + minIndexInterval);
-        if (maxIndexInterval != null && minIndexInterval != null && maxIndexInterval < minIndexInterval)
-            throw new ConfigurationException(KW_MAX_INDEX_INTERVAL + " must be greater than " + KW_MIN_INDEX_INTERVAL + ", but was " + maxIndexInterval);
-
-        SpeculativeRetry.fromString(getString(KW_SPECULATIVE_RETRY, SpeculativeRetry.RetryType.NONE.name()));
-    }
-
-    public Class<? extends AbstractCompactionStrategy> getCompactionStrategy()
-    {
-        return compactionStrategyClass;
-    }
-
-    public Map<String, String> getCompactionOptions() throws SyntaxException
-    {
-        Map<String, String> compactionOptions = getMap(KW_COMPACTION);
-        if (compactionOptions == null)
-            return Collections.emptyMap();
-        return compactionOptions;
-    }
-
-    public Map<String, String> getCompressionOptions() throws SyntaxException
-    {
-        Map<String, String> compressionOptions = getMap(KW_COMPRESSION);
-        if (compressionOptions == null)
-            return Collections.emptyMap();
-        return compressionOptions;
-    }
-    public CachingOptions getCachingOptions() throws SyntaxException, ConfigurationException
-    {
-        CachingOptions options = null;
-        Object val = properties.get(KW_CACHING);
-        if (val == null)
-            return null;
-        else if (val instanceof Map)
-            options = CachingOptions.fromMap(getMap(KW_CACHING));
-        else if (val instanceof String) // legacy syntax
-        {
-            options = CachingOptions.fromString(getSimple(KW_CACHING));
-            logger.warn("Setting caching options with deprecated syntax. {}", val);
-        }
-        return options;
-    }
-
-    public Integer getDefaultTimeToLive() throws SyntaxException
-    {
-        return getInt(KW_DEFAULT_TIME_TO_LIVE, 0);
-    }
-
-    public void applyToCFMetadata(CFMetaData cfm) throws ConfigurationException, SyntaxException
-    {
-        if (hasProperty(KW_COMMENT))
-            cfm.comment(getString(KW_COMMENT, ""));
-
-        cfm.readRepairChance(getDouble(KW_READREPAIRCHANCE, cfm.getReadRepairChance()));
-        cfm.dcLocalReadRepairChance(getDouble(KW_DCLOCALREADREPAIRCHANCE, cfm.getDcLocalReadRepairChance()));
-        cfm.gcGraceSeconds(getInt(KW_GCGRACESECONDS, cfm.getGcGraceSeconds()));
-        int minCompactionThreshold = toInt(KW_MINCOMPACTIONTHRESHOLD, getCompactionOptions().get(KW_MINCOMPACTIONTHRESHOLD), cfm.getMinCompactionThreshold());
-        int maxCompactionThreshold = toInt(KW_MAXCOMPACTIONTHRESHOLD, getCompactionOptions().get(KW_MAXCOMPACTIONTHRESHOLD), cfm.getMaxCompactionThreshold());
-        if (minCompactionThreshold <= 0 || maxCompactionThreshold <= 0)
-            throw new ConfigurationException("Disabling compaction by setting compaction thresholds to 0 has been deprecated, set the compaction option 'enabled' to false instead.");
-        cfm.minCompactionThreshold(minCompactionThreshold);
-        cfm.maxCompactionThreshold(maxCompactionThreshold);
-        cfm.defaultTimeToLive(getInt(KW_DEFAULT_TIME_TO_LIVE, cfm.getDefaultTimeToLive()));
-        cfm.speculativeRetry(CFMetaData.SpeculativeRetry.fromString(getString(KW_SPECULATIVE_RETRY, cfm.getSpeculativeRetry().toString())));
-        cfm.memtableFlushPeriod(getInt(KW_MEMTABLE_FLUSH_PERIOD, cfm.getMemtableFlushPeriod()));
-        cfm.minIndexInterval(getInt(KW_MIN_INDEX_INTERVAL, cfm.getMinIndexInterval()));
-        cfm.maxIndexInterval(getInt(KW_MAX_INDEX_INTERVAL, cfm.getMaxIndexInterval()));
-
-        if (compactionStrategyClass != null)
-        {
-            cfm.compactionStrategyClass(compactionStrategyClass);
-            cfm.compactionStrategyOptions(new HashMap<>(getCompactionOptions()));
-        }
-
-        cfm.bloomFilterFpChance(getDouble(KW_BF_FP_CHANCE, cfm.getBloomFilterFpChance()));
-
-        if (!getCompressionOptions().isEmpty())
-        {
-            CompressionParameters compressionParameters = CompressionParameters.fromMap(getCompressionOptions());
-            compressionParameters.validate();
-            cfm.compressionParameters(compressionParameters);
-        }
-        CachingOptions cachingOptions = getCachingOptions();
-        if (cachingOptions != null)
-            cfm.caching(cachingOptions);
-    }
-
-    @Override
-    public String toString()
-    {
-        return String.format("CFPropDefs(%s)", properties);
-    }
-
-    private void validateMinimumInt(String field, int minimumValue, int defaultValue) throws SyntaxException, ConfigurationException
-    {
-        Integer val = getInt(field, null);
-        if (val != null && val < minimumValue)
-            throw new ConfigurationException(String.format("%s cannot be smaller than %d, (default %d), but was %d",
-                                                            field, minimumValue, defaultValue, val));
-
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/CFProperties.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CFProperties.java b/src/java/org/apache/cassandra/cql3/statements/CFProperties.java
index 50ec360..92dd994 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CFProperties.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CFProperties.java
@@ -15,10 +15,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.cassandra.cql3.statements;
 
-import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.Map;
 
@@ -28,7 +26,7 @@ import org.apache.cassandra.db.marshal.ReversedType;
 
 public class CFProperties
 {
-    public final CFPropDefs properties = new CFPropDefs();
+    public final TableAttributes properties = new TableAttributes();
     final Map<ColumnIdentifier, Boolean> definedOrdering = new LinkedHashMap<>(); // Insertion ordering is important
     boolean useCompactStorage = false;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
index 6e28f8c..aa58fb4 100644
--- a/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/CreateTableStatement.java
@@ -30,54 +30,40 @@ import org.apache.cassandra.cql3.*;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.exceptions.*;
-import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.schema.TableParams;
 import org.apache.cassandra.service.ClientState;
 import org.apache.cassandra.service.MigrationManager;
 import org.apache.cassandra.service.QueryState;
 import org.apache.cassandra.transport.Event;
-import org.apache.cassandra.utils.ByteBufferUtil;
 
-/** A <code>CREATE TABLE</code> parsed from a CQL query statement. */
+/** A {@code CREATE TABLE} parsed from a CQL query statement. */
 public class CreateTableStatement extends SchemaAlteringStatement
 {
     private List<AbstractType<?>> keyTypes;
     private List<AbstractType<?>> clusteringTypes;
 
-    private Map<ByteBuffer, CollectionType> collections = new HashMap<>();
+    private final Map<ByteBuffer, CollectionType> collections = new HashMap<>();
 
     private final List<ColumnIdentifier> keyAliases = new ArrayList<>();
     private final List<ColumnIdentifier> columnAliases = new ArrayList<>();
-    private ByteBuffer valueAlias;
 
     private boolean isDense;
     private boolean isCompound;
     private boolean hasCounters;
 
     // use a TreeMap to preserve ordering across JDK versions (see CASSANDRA-9492)
-    private final Map<ColumnIdentifier, AbstractType> columns = new TreeMap<>(new Comparator<ColumnIdentifier>()
-    {
-        public int compare(ColumnIdentifier o1, ColumnIdentifier o2)
-        {
-            return o1.bytes.compareTo(o2.bytes);
-        }
-    });
+    private final Map<ColumnIdentifier, AbstractType> columns = new TreeMap<>((o1, o2) -> o1.bytes.compareTo(o2.bytes));
+
     private final Set<ColumnIdentifier> staticColumns;
-    private final CFPropDefs properties;
+    private final TableParams params;
     private final boolean ifNotExists;
 
-    public CreateTableStatement(CFName name, CFPropDefs properties, boolean ifNotExists, Set<ColumnIdentifier> staticColumns)
+    public CreateTableStatement(CFName name, TableParams params, boolean ifNotExists, Set<ColumnIdentifier> staticColumns)
     {
         super(name);
-        this.properties = properties;
+        this.params = params;
         this.ifNotExists = ifNotExists;
         this.staticColumns = staticColumns;
-
-        if (!this.properties.hasProperty(CFPropDefs.KW_COMPRESSION) && CFMetaData.DEFAULT_COMPRESSOR != null)
-            this.properties.addProperty(CFPropDefs.KW_COMPRESSION,
-                                        new HashMap<String, String>()
-                                        {{
-                                            put(CompressionParameters.CLASS, CFMetaData.DEFAULT_COMPRESSOR);
-                                        }});
     }
 
     public void checkAccess(ClientState state) throws UnauthorizedException, InvalidRequestException
@@ -168,21 +154,19 @@ public class CreateTableStatement extends SchemaAlteringStatement
 
     /**
      * Returns a CFMetaData instance based on the parameters parsed from this
-     * <code>CREATE</code> statement, or defaults where applicable.
+     * {@code CREATE} statement, or defaults where applicable.
      *
      * @return a CFMetaData instance corresponding to the values parsed from this statement
      * @throws InvalidRequestException on failure to validate parsed parameters
      */
-    public CFMetaData getCFMetaData() throws RequestValidationException
+    public CFMetaData getCFMetaData()
     {
-        CFMetaData newCFMD = metadataBuilder().build();
-        applyPropertiesTo(newCFMD);
-        return newCFMD;
+        return metadataBuilder().build().params(params);
     }
 
-    public void applyPropertiesTo(CFMetaData cfmd) throws RequestValidationException
+    public TableParams params()
     {
-        properties.applyToCFMetadata(cfmd);
+        return params;
     }
 
     public static class RawStatement extends CFStatement
@@ -190,9 +174,9 @@ public class CreateTableStatement extends SchemaAlteringStatement
         private final Map<ColumnIdentifier, CQL3Type.Raw> definitions = new HashMap<>();
         public final CFProperties properties = new CFProperties();
 
-        private final List<List<ColumnIdentifier>> keyAliases = new ArrayList<List<ColumnIdentifier>>();
-        private final List<ColumnIdentifier> columnAliases = new ArrayList<ColumnIdentifier>();
-        private final Set<ColumnIdentifier> staticColumns = new HashSet<ColumnIdentifier>();
+        private final List<List<ColumnIdentifier>> keyAliases = new ArrayList<>();
+        private final List<ColumnIdentifier> columnAliases = new ArrayList<>();
+        private final Set<ColumnIdentifier> staticColumns = new HashSet<>();
 
         private final Multiset<ColumnIdentifier> definedNames = HashMultiset.create(1);
 
@@ -221,7 +205,9 @@ public class CreateTableStatement extends SchemaAlteringStatement
 
             properties.validate();
 
-            CreateTableStatement stmt = new CreateTableStatement(cfName, properties.properties, ifNotExists, staticColumns);
+            TableParams params = properties.properties.asNewTableParams();
+
+            CreateTableStatement stmt = new CreateTableStatement(cfName, params, ifNotExists, staticColumns);
 
             for (Map.Entry<ColumnIdentifier, CQL3Type.Raw> entry : definitions.entrySet())
             {
@@ -238,11 +224,11 @@ public class CreateTableStatement extends SchemaAlteringStatement
                 throw new InvalidRequestException("No PRIMARY KEY specifed (exactly one required)");
             if (keyAliases.size() > 1)
                 throw new InvalidRequestException("Multiple PRIMARY KEYs specifed (exactly one required)");
-            if (stmt.hasCounters && properties.properties.getDefaultTimeToLive() > 0)
+            if (stmt.hasCounters && params.defaultTimeToLive > 0)
                 throw new InvalidRequestException("Cannot set default_time_to_live on a table with counters");
 
             List<ColumnIdentifier> kAliases = keyAliases.get(0);
-            stmt.keyTypes = new ArrayList<AbstractType<?>>(kAliases.size());
+            stmt.keyTypes = new ArrayList<>(kAliases.size());
             for (ColumnIdentifier alias : kAliases)
             {
                 stmt.keyAliases.add(alias);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/KeyspaceAttributes.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/KeyspaceAttributes.java b/src/java/org/apache/cassandra/cql3/statements/KeyspaceAttributes.java
index d931530..db6b0d6 100644
--- a/src/java/org/apache/cassandra/cql3/statements/KeyspaceAttributes.java
+++ b/src/java/org/apache/cassandra/cql3/statements/KeyspaceAttributes.java
@@ -21,34 +21,42 @@ import java.util.*;
 
 import com.google.common.collect.ImmutableSet;
 
-import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.schema.KeyspaceParams.Option;
-import org.apache.cassandra.schema.KeyspaceParams.Replication;
+import org.apache.cassandra.schema.ReplicationParams;
 
-public class KeyspaceAttributes extends PropertyDefinitions
+public final class KeyspaceAttributes extends PropertyDefinitions
 {
-    private static final Set<String> keywords = ImmutableSet.of(Option.DURABLE_WRITES.toString(), Option.REPLICATION.toString());
-    private static final Set<String> obsoleteKeywords = ImmutableSet.of();
+    private static final Set<String> validKeywords;
+    private static final Set<String> obsoleteKeywords;
 
-    public void validate() throws SyntaxException
+    static
     {
-        validate(keywords, obsoleteKeywords);
+        ImmutableSet.Builder<String> validBuilder = ImmutableSet.builder();
+        for (Option option : Option.values())
+            validBuilder.add(option.toString());
+        validKeywords = validBuilder.build();
+        obsoleteKeywords = ImmutableSet.of();
+    }
+
+    public void validate()
+    {
+        validate(validKeywords, obsoleteKeywords);
     }
 
     public String getReplicationStrategyClass()
     {
-        return getAllReplicationOptions().get(Replication.CLASS);
+        return getAllReplicationOptions().get(ReplicationParams.CLASS);
     }
 
-    public Map<String, String> getReplicationOptions() throws SyntaxException
+    public Map<String, String> getReplicationOptions()
     {
         Map<String, String> replication = new HashMap<>(getAllReplicationOptions());
-        replication.remove(Replication.CLASS);
+        replication.remove(ReplicationParams.CLASS);
         return replication;
     }
 
-    public Map<String, String> getAllReplicationOptions() throws SyntaxException
+    public Map<String, String> getAllReplicationOptions()
     {
         Map<String, String> replication = getMap(Option.REPLICATION.toString());
         return replication == null
@@ -65,9 +73,9 @@ public class KeyspaceAttributes extends PropertyDefinitions
     public KeyspaceParams asAlteredKeyspaceParams(KeyspaceParams previous)
     {
         boolean durableWrites = getBoolean(Option.DURABLE_WRITES.toString(), previous.durableWrites);
-        Replication replication = getReplicationStrategyClass() == null
-                                ? previous.replication
-                                : Replication.fromMap(getAllReplicationOptions());
+        ReplicationParams replication = getReplicationStrategyClass() == null
+                                      ? previous.replication
+                                      : ReplicationParams.fromMap(getAllReplicationOptions());
         return new KeyspaceParams(durableWrites, replication);
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/b31845c4/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java b/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
new file mode 100644
index 0000000..ed64f0d
--- /dev/null
+++ b/src/java/org/apache/cassandra/cql3/statements/TableAttributes.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.cql3.statements;
+
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.cassandra.exceptions.SyntaxException;
+import org.apache.cassandra.schema.*;
+import org.apache.cassandra.schema.TableParams.Option;
+
+import static java.lang.String.format;
+
+public final class TableAttributes extends PropertyDefinitions
+{
+    private static final Set<String> validKeywords;
+    private static final Set<String> obsoleteKeywords;
+
+    static
+    {
+        ImmutableSet.Builder<String> validBuilder = ImmutableSet.builder();
+        for (Option option : Option.values())
+            validBuilder.add(option.toString());
+        validKeywords = validBuilder.build();
+        obsoleteKeywords = ImmutableSet.of();
+    }
+
+    public void validate()
+    {
+        validate(validKeywords, obsoleteKeywords);
+        build(TableParams.builder()).validate();
+    }
+
+    public TableParams asNewTableParams()
+    {
+        return build(TableParams.builder());
+    }
+
+    public TableParams asAlteredTableParams(TableParams previous)
+    {
+        return build(TableParams.builder(previous));
+    }
+
+    private TableParams build(TableParams.Builder builder)
+    {
+        if (hasOption(Option.BLOOM_FILTER_FP_CHANCE))
+            builder.bloomFilterFpChance(getDouble(Option.BLOOM_FILTER_FP_CHANCE));
+
+        if (hasOption(Option.CACHING))
+            builder.caching(CachingParams.fromMap(getMap(Option.CACHING)));
+
+        if (hasOption(Option.COMMENT))
+            builder.comment(getString(Option.COMMENT));
+
+        if (hasOption(Option.COMPACTION))
+            builder.compaction(CompactionParams.fromMap(getMap(Option.COMPACTION)));
+
+        if (hasOption(Option.COMPRESSION))
+            builder.compression(CompressionParams.fromMap(getMap(Option.COMPRESSION)));
+
+        if (hasOption(Option.DCLOCAL_READ_REPAIR_CHANCE))
+            builder.dcLocalReadRepairChance(getDouble(Option.DCLOCAL_READ_REPAIR_CHANCE));
+
+        if (hasOption(Option.DEFAULT_TIME_TO_LIVE))
+            builder.defaultTimeToLive(getInt(Option.DEFAULT_TIME_TO_LIVE));
+
+        if (hasOption(Option.GC_GRACE_SECONDS))
+            builder.gcGraceSeconds(getInt(Option.GC_GRACE_SECONDS));
+
+        if (hasOption(Option.MAX_INDEX_INTERVAL))
+            builder.maxIndexInterval(getInt(Option.MAX_INDEX_INTERVAL));
+
+        if (hasOption(Option.MEMTABLE_FLUSH_PERIOD_IN_MS))
+            builder.memtableFlushPeriodInMs(getInt(Option.MEMTABLE_FLUSH_PERIOD_IN_MS));
+
+        if (hasOption(Option.MIN_INDEX_INTERVAL))
+            builder.minIndexInterval(getInt(Option.MIN_INDEX_INTERVAL));
+
+        if (hasOption(Option.READ_REPAIR_CHANCE))
+            builder.readRepairChance(getDouble(Option.READ_REPAIR_CHANCE));
+
+        if (hasOption(Option.SPECULATIVE_RETRY))
+            builder.speculativeRetry(SpeculativeRetryParam.fromString(getString(Option.SPECULATIVE_RETRY)));
+
+        return builder.build();
+    }
+
+    private double getDouble(Option option)
+    {
+        String value = getString(option);
+
+        try
+        {
+            return Double.parseDouble(value);
+        }
+        catch (NumberFormatException e)
+        {
+            throw new SyntaxException(format("Invalid double value %s for '%s'", value, option));
+        }
+    }
+
+    private int getInt(Option option)
+    {
+        String value = getString(option);
+
+        try
+        {
+            return Integer.parseInt(value);
+        }
+        catch (NumberFormatException e)
+        {
+            throw new SyntaxException(String.format("Invalid integer value %s for '%s'", value, option));
+        }
+    }
+
+    private String getString(Option option)
+    {
+        String value = getSimple(option.toString());
+        if (value == null)
+            throw new IllegalStateException(format("Option '%s' is absent", option));
+        return value;
+    }
+
+    private Map<String, String> getMap(Option option)
+    {
+        Map<String, String> value = getMap(option.toString());
+        if (value == null)
+            throw new IllegalStateException(format("Option '%s' is absent", option));
+        return value;
+    }
+
+    private boolean hasOption(Option option)
+    {
+        return hasProperty(option.toString());
+    }
+}