You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by ad...@apache.org on 2019/02/19 15:20:19 UTC

[cassandra] branch trunk updated (47d4971 -> 094689a)

This is an automated email from the ASF dual-hosted git repository.

adelapena pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git.


    from 47d4971  Fix SimpleStrategy option validation
     new e6a61be  Add flag to disable SASI indexes, and warning on creation
     new 094689a  Merge branch 'cassandra-3.11' into trunk

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 CHANGES.txt                                        |  3 +-
 NEWS.txt                                           | 17 +++++++-
 conf/cassandra.yaml                                | 23 ++++++----
 src/java/org/apache/cassandra/config/Config.java   |  4 +-
 .../cassandra/config/DatabaseDescriptor.java       | 17 +++++++-
 .../statements/schema/CreateIndexStatement.java    | 15 +++++++
 .../statements/schema/CreateViewStatement.java     |  6 +--
 src/java/org/apache/cassandra/db/view/View.java    |  2 +
 .../org/apache/cassandra/index/sasi/SASIIndex.java |  2 +
 test/conf/cassandra-murmur.yaml                    |  2 +
 test/conf/cassandra.yaml                           |  2 +
 test/unit/org/apache/cassandra/cql3/ViewTest.java  | 51 ++++++++++++++++++++++
 .../apache/cassandra/index/sasi/SASICQLTest.java   | 49 +++++++++++++++++++++
 13 files changed, 178 insertions(+), 15 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org


[cassandra] 01/01: Merge branch 'cassandra-3.11' into trunk

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adelapena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 094689acf10bf133637c53aeebd3e8157832a279
Merge: 47d4971 e6a61be
Author: Andrés de la Peña <a....@gmail.com>
AuthorDate: Tue Feb 19 15:08:10 2019 +0000

    Merge branch 'cassandra-3.11' into trunk
    
    # Conflicts:
    #	CHANGES.txt
    #	conf/cassandra.yaml
    #	src/java/org/apache/cassandra/config/Config.java
    #	src/java/org/apache/cassandra/config/DatabaseDescriptor.java
    #	src/java/org/apache/cassandra/cql3/statements/CreateIndexStatement.java
    #	src/java/org/apache/cassandra/cql3/statements/CreateViewStatement.java
    #	src/java/org/apache/cassandra/db/view/View.java
    #	test/unit/org/apache/cassandra/cql3/ViewTest.java
    #	test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java

 CHANGES.txt                                        |  3 +-
 NEWS.txt                                           | 17 +++++++-
 conf/cassandra.yaml                                | 23 ++++++----
 src/java/org/apache/cassandra/config/Config.java   |  4 +-
 .../cassandra/config/DatabaseDescriptor.java       | 17 +++++++-
 .../statements/schema/CreateIndexStatement.java    | 15 +++++++
 .../statements/schema/CreateViewStatement.java     |  6 +--
 src/java/org/apache/cassandra/db/view/View.java    |  2 +
 .../org/apache/cassandra/index/sasi/SASIIndex.java |  2 +
 test/conf/cassandra-murmur.yaml                    |  2 +
 test/conf/cassandra.yaml                           |  2 +
 test/unit/org/apache/cassandra/cql3/ViewTest.java  | 51 ++++++++++++++++++++++
 .../apache/cassandra/index/sasi/SASICQLTest.java   | 49 +++++++++++++++++++++
 13 files changed, 178 insertions(+), 15 deletions(-)

diff --cc CHANGES.txt
index 09d1733,f520aed..02233dd
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,354 -1,5 +1,355 @@@
 +4.0
 + * Fix SimpleStrategy option validation (CASSANDRA-15007)
 + * Don't try to cancel 2i compactions when starting anticompaction (CASSANDRA-15024)
 + * Avoid NPE in RepairRunnable.recordFailure (CASSANDRA-15025)
 + * SSL Cert Hot Reloading should check for sanity of the new keystore/truststore before loading it (CASSANDRA-14991)
 + * Avoid leaking threads when failing anticompactions and rate limit anticompactions (CASSANDRA-15002)
 + * Validate token() arguments early instead of throwing NPE at execution (CASSANDRA-14989)
 + * Add a new tool to dump audit logs (CASSANDRA-14885)
 + * Fix generating javadoc with Java11 (CASSANDRA-14988)
 + * Only cancel conflicting compactions when starting anticompactions and sub range compactions (CASSANDRA-14935)
 + * Use a stub IndexRegistry for non-daemon use cases (CASSANDRA-14938)
 + * Don't enable client transports when bootstrap is pending (CASSANDRA-14525)
 + * Make antiCompactGroup throw exception on error and anticompaction non cancellable
 +   again (CASSANDRA-14936)
 + * Catch empty/invalid bounds in SelectStatement (CASSANDRA-14849)
 + * Auto-expand replication_factor for NetworkTopologyStrategy (CASSANDRA-14303)
 + * Transient Replication: support EACH_QUORUM (CASSANDRA-14727)
 + * BufferPool: allocating thread for new chunks should acquire directly (CASSANDRA-14832)
 + * Send correct messaging version in internode messaging handshake's third message (CASSANDRA-14896)
 + * Make Read and Write Latency columns consistent for proxyhistograms and tablehistograms (CASSANDRA-11939)
 + * Make protocol checksum type option case insensitive (CASSANDRA-14716)
 + * Forbid re-adding static columns as regular and vice versa (CASSANDRA-14913)
 + * Audit log allows system keyspaces to be audited via configuration options (CASSANDRA-14498)
 + * Lower default chunk_length_in_kb from 64kb to 16kb (CASSANDRA-13241)
 + * Startup checker should wait for count rather than percentage (CASSANDRA-14297)
 + * Fix incorrect sorting of replicas in SimpleStrategy.calculateNaturalReplicas (CASSANDRA-14862)
 + * Partitioned outbound internode TCP connections can occur when nodes restart (CASSANDRA-14358)
 + * Don't write to system_distributed.repair_history, system_traces.sessions, system_traces.events in mixed version 3.X/4.0 clusters (CASSANDRA-14841)
 + * Avoid running query to self through messaging service (CASSANDRA-14807)
 + * Allow using custom script for chronicle queue BinLog archival (CASSANDRA-14373)
 + * Transient->Full range movements mishandle consistency level upgrade (CASSANDRA-14759)
 + * ReplicaCollection follow-up (CASSANDRA-14726)
 + * Transient node receives full data requests (CASSANDRA-14762)
 + * Enable snapshot artifacts publish (CASSANDRA-12704)
 + * Introduce RangesAtEndpoint.unwrap to simplify StreamSession.addTransferRanges (CASSANDRA-14770)
 + * LOCAL_QUORUM may speculate to non-local nodes, resulting in Timeout instead of Unavailable (CASSANDRA-14735)
 + * Avoid creating empty compaction tasks after truncate (CASSANDRA-14780)
 + * Fail incremental repair prepare phase if it encounters sstables from un-finalized sessions (CASSANDRA-14763)
 + * Add a check for receiving digest response from transient node (CASSANDRA-14750)
 + * Fail query on transient replica if coordinator only expects full data (CASSANDRA-14704)
 + * Remove mentions of transient replication from repair path (CASSANDRA-14698)
 + * Fix handleRepairStatusChangedNotification to remove first then add (CASSANDRA-14720)
 + * Allow transient node to serve as a repair coordinator (CASSANDRA-14693)
 + * DecayingEstimatedHistogramReservoir.EstimatedHistogramReservoirSnapshot returns wrong value for size() and incorrectly calculates count (CASSANDRA-14696)
 + * AbstractReplicaCollection equals and hash code should throw due to conflict between order sensitive/insensitive uses (CASSANDRA-14700)
 + * Detect inconsistencies in repaired data on the read path (CASSANDRA-14145)
 + * Add checksumming to the native protocol (CASSANDRA-13304)
 + * Make AuthCache more easily extendable (CASSANDRA-14662)
 + * Extend RolesCache to include detailed role info (CASSANDRA-14497)
 + * Add fqltool compare (CASSANDRA-14619)
 + * Add fqltool replay (CASSANDRA-14618)
 + * Log keyspace in full query log (CASSANDRA-14656)
 + * Transient Replication and Cheap Quorums (CASSANDRA-14404)
 + * Log server-generated timestamp and nowInSeconds used by queries in FQL (CASSANDRA-14675)
 + * Add diagnostic events for read repairs (CASSANDRA-14668)
 + * Use consistent nowInSeconds and timestamps values within a request (CASSANDRA-14671)
 + * Add sampler for query time and expose with nodetool (CASSANDRA-14436)
 + * Clean up Message.Request implementations (CASSANDRA-14677)
 + * Disable old native protocol versions on demand (CASANDRA-14659)
 + * Allow specifying now-in-seconds in native protocol (CASSANDRA-14664)
 + * Improve BTree build performance by avoiding data copy (CASSANDRA-9989)
 + * Make monotonic read / read repair configurable (CASSANDRA-14635)
 + * Refactor CompactionStrategyManager (CASSANDRA-14621)
 + * Flush netty client messages immediately by default (CASSANDRA-13651)
 + * Improve read repair blocking behavior (CASSANDRA-10726)
 + * Add a virtual table to expose settings (CASSANDRA-14573)
 + * Fix up chunk cache handling of metrics (CASSANDRA-14628)
 + * Extend IAuthenticator to accept peer SSL certificates (CASSANDRA-14652)
 + * Incomplete handling of exceptions when decoding incoming messages (CASSANDRA-14574)
 + * Add diagnostic events for user audit logging (CASSANDRA-13668)
 + * Allow retrieving diagnostic events via JMX (CASSANDRA-14435)
 + * Add base classes for diagnostic events (CASSANDRA-13457)
 + * Clear view system metadata when dropping keyspace (CASSANDRA-14646)
 + * Allocate ReentrantLock on-demand in java11 AtomicBTreePartitionerBase (CASSANDRA-14637)
 + * Make all existing virtual tables use LocalPartitioner (CASSANDRA-14640)
 + * Revert 4.0 GC alg back to CMS (CASANDRA-14636)
 + * Remove hardcoded java11 jvm args in idea workspace files (CASSANDRA-14627)
 + * Update netty to 4.1.128 (CASSANDRA-14633)
 + * Add a virtual table to expose thread pools (CASSANDRA-14523)
 + * Add a virtual table to expose caches (CASSANDRA-14538, CASSANDRA-14626)
 + * Fix toDate function for timestamp arguments (CASSANDRA-14502)
 + * Revert running dtests by default in circleci (CASSANDRA-14614)
 + * Stream entire SSTables when possible (CASSANDRA-14556)
 + * Cell reconciliation should not depend on nowInSec (CASSANDRA-14592)
 + * Add experimental support for Java 11 (CASSANDRA-9608)
 + * Make PeriodicCommitLogService.blockWhenSyncLagsNanos configurable (CASSANDRA-14580)
 + * Improve logging in MessageInHandler's constructor (CASSANDRA-14576)
 + * Set broadcast address in internode messaging handshake (CASSANDRA-14579)
 + * Wait for schema agreement prior to building MVs (CASSANDRA-14571)
 + * Make all DDL statements idempotent and not dependent on global state (CASSANDRA-13426)
 + * Bump the hints messaging version to match the current one (CASSANDRA-14536)
 + * OffsetAwareConfigurationLoader doesn't set ssl storage port causing bind errors in CircleCI (CASSANDRA-14546)
 + * Report why native_transport_port fails to bind (CASSANDRA-14544)
 + * Optimize internode messaging protocol (CASSANDRA-14485)
 + * Internode messaging handshake sends wrong messaging version number (CASSANDRA-14540)
 + * Add a virtual table to expose active client connections (CASSANDRA-14458)
 + * Clean up and refactor client metrics (CASSANDRA-14524)
 + * Nodetool import row cache invalidation races with adding sstables to tracker (CASSANDRA-14529)
 + * Fix assertions in LWTs after TableMetadata was made immutable (CASSANDRA-14356)
 + * Abort compactions quicker (CASSANDRA-14397)
 + * Support light-weight transactions in cassandra-stress (CASSANDRA-13529)
 + * Make AsyncOneResponse use the correct timeout (CASSANDRA-14509)
 + * Add option to sanity check tombstones on reads/compactions (CASSANDRA-14467)
 + * Add a virtual table to expose all running sstable tasks (CASSANDRA-14457)
 + * Let nodetool import take a list of directories (CASSANDRA-14442)
 + * Avoid unneeded memory allocations / cpu for disabled log levels (CASSANDRA-14488)
 + * Implement virtual keyspace interface (CASSANDRA-7622)
 + * nodetool import cleanup and improvements (CASSANDRA-14417)
 + * Bump jackson version to >= 2.9.5 (CASSANDRA-14427)
 + * Allow nodetool toppartitions without specifying table (CASSANDRA-14360)
 + * Audit logging for database activity (CASSANDRA-12151)
 + * Clean up build artifacts in docs container (CASSANDRA-14432)
 + * Minor network authz improvements (Cassandra-14413)
 + * Automatic sstable upgrades (CASSANDRA-14197)
 + * Replace deprecated junit.framework.Assert usages with org.junit.Assert (CASSANDRA-14431)
 + * Cassandra-stress throws NPE if insert section isn't specified in user profile (CASSSANDRA-14426)
 + * List clients by protocol versions `nodetool clientstats --by-protocol` (CASSANDRA-14335)
 + * Improve LatencyMetrics performance by reducing write path processing (CASSANDRA-14281)
 + * Add network authz (CASSANDRA-13985)
 + * Use the correct IP/Port for Streaming when localAddress is left unbound (CASSANDRA-14389)
 + * nodetool listsnapshots is missing local system keyspace snapshots (CASSANDRA-14381)
 + * Remove StreamCoordinator.streamExecutor thread pool (CASSANDRA-14402)
 + * Rename nodetool --with-port to --print-port to disambiguate from --port (CASSANDRA-14392)
 + * Client TOPOLOGY_CHANGE messages have wrong port. (CASSANDRA-14398)
 + * Add ability to load new SSTables from a separate directory (CASSANDRA-6719)
 + * Eliminate background repair and probablistic read_repair_chance table options
 +   (CASSANDRA-13910)
 + * Bind to correct local address in 4.0 streaming (CASSANDRA-14362)
 + * Use standard Amazon naming for datacenter and rack in Ec2Snitch (CASSANDRA-7839)
 + * Fix junit failure for SSTableReaderTest (CASSANDRA-14387)
 + * Abstract write path for pluggable storage (CASSANDRA-14118)
 + * nodetool describecluster should be more informative (CASSANDRA-13853)
 + * Compaction performance improvements (CASSANDRA-14261) 
 + * Refactor Pair usage to avoid boxing ints/longs (CASSANDRA-14260)
 + * Add options to nodetool tablestats to sort and limit output (CASSANDRA-13889)
 + * Rename internals to reflect CQL vocabulary (CASSANDRA-14354)
 + * Add support for hybrid MIN(), MAX() speculative retry policies
 +   (CASSANDRA-14293, CASSANDRA-14338, CASSANDRA-14352)
 + * Fix some regressions caused by 14058 (CASSANDRA-14353)
 + * Abstract repair for pluggable storage (CASSANDRA-14116)
 + * Add meaningful toString() impls (CASSANDRA-13653)
 + * Add sstableloader option to accept target keyspace name (CASSANDRA-13884)
 + * Move processing of EchoMessage response to gossip stage (CASSANDRA-13713)
 + * Add coordinator write metric per CF (CASSANDRA-14232)
 + * Correct and clarify SSLFactory.getSslContext method and call sites (CASSANDRA-14314)
 + * Handle static and partition deletion properly on ThrottledUnfilteredIterator (CASSANDRA-14315)
 + * NodeTool clientstats should show SSL Cipher (CASSANDRA-14322)
 + * Add ability to specify driver name and version (CASSANDRA-14275)
 + * Abstract streaming for pluggable storage (CASSANDRA-14115)
 + * Forced incremental repairs should promote sstables if they can (CASSANDRA-14294)
 + * Use Murmur3 for validation compactions (CASSANDRA-14002)
 + * Comma at the end of the seed list is interpretated as localhost (CASSANDRA-14285)
 + * Refactor read executor and response resolver, abstract read repair (CASSANDRA-14058)
 + * Add optional startup delay to wait until peers are ready (CASSANDRA-13993)
 + * Add a few options to nodetool verify (CASSANDRA-14201)
 + * CVE-2017-5929 Security vulnerability and redefine default log rotation policy (CASSANDRA-14183)
 + * Use JVM default SSL validation algorithm instead of custom default (CASSANDRA-13259)
 + * Better document in code InetAddressAndPort usage post 7544, incorporate port into UUIDGen node (CASSANDRA-14226)
 + * Fix sstablemetadata date string for minLocalDeletionTime (CASSANDRA-14132)
 + * Make it possible to change neverPurgeTombstones during runtime (CASSANDRA-14214)
 + * Remove GossipDigestSynVerbHandler#doSort() (CASSANDRA-14174)
 + * Add nodetool clientlist (CASSANDRA-13665)
 + * Revert ProtocolVersion changes from CASSANDRA-7544 (CASSANDRA-14211)
 + * Non-disruptive seed node list reload (CASSANDRA-14190)
 + * Nodetool tablehistograms to print statics for all the tables (CASSANDRA-14185)
 + * Migrate dtests to use pytest and python3 (CASSANDRA-14134)
 + * Allow storage port to be configurable per node (CASSANDRA-7544)
 + * Make sub-range selection for non-frozen collections return null instead of empty (CASSANDRA-14182)
 + * BloomFilter serialization format should not change byte ordering (CASSANDRA-9067)
 + * Remove unused on-heap BloomFilter implementation (CASSANDRA-14152)
 + * Delete temp test files on exit (CASSANDRA-14153)
 + * Make PartitionUpdate and Mutation immutable (CASSANDRA-13867)
 + * Fix CommitLogReplayer exception for CDC data (CASSANDRA-14066)
 + * Fix cassandra-stress startup failure (CASSANDRA-14106)
 + * Remove initialDirectories from CFS (CASSANDRA-13928)
 + * Fix trivial log format error (CASSANDRA-14015)
 + * Allow sstabledump to do a json object per partition (CASSANDRA-13848)
 + * Add option to optimise merkle tree comparison across replicas (CASSANDRA-3200)
 + * Remove unused and deprecated methods from AbstractCompactionStrategy (CASSANDRA-14081)
 + * Fix Distribution.average in cassandra-stress (CASSANDRA-14090)
 + * Support a means of logging all queries as they were invoked (CASSANDRA-13983)
 + * Presize collections (CASSANDRA-13760)
 + * Add GroupCommitLogService (CASSANDRA-13530)
 + * Parallelize initial materialized view build (CASSANDRA-12245)
 + * Fix flaky SecondaryIndexManagerTest.assert[Not]MarkedAsBuilt (CASSANDRA-13965)
 + * Make LWTs send resultset metadata on every request (CASSANDRA-13992)
 + * Fix flaky indexWithFailedInitializationIsNotQueryableAfterPartialRebuild (CASSANDRA-13963)
 + * Introduce leaf-only iterator (CASSANDRA-9988)
 + * Upgrade Guava to 23.3 and Airline to 0.8 (CASSANDRA-13997)
 + * Allow only one concurrent call to StatusLogger (CASSANDRA-12182)
 + * Refactoring to specialised functional interfaces (CASSANDRA-13982)
 + * Speculative retry should allow more friendly params (CASSANDRA-13876)
 + * Throw exception if we send/receive repair messages to incompatible nodes (CASSANDRA-13944)
 + * Replace usages of MessageDigest with Guava's Hasher (CASSANDRA-13291)
 + * Add nodetool cmd to print hinted handoff window (CASSANDRA-13728)
 + * Fix some alerts raised by static analysis (CASSANDRA-13799)
 + * Checksum sstable metadata (CASSANDRA-13321, CASSANDRA-13593)
 + * Add result set metadata to prepared statement MD5 hash calculation (CASSANDRA-10786)
 + * Refactor GcCompactionTest to avoid boxing (CASSANDRA-13941)
 + * Expose recent histograms in JmxHistograms (CASSANDRA-13642)
 + * Fix buffer length comparison when decompressing in netty-based streaming (CASSANDRA-13899)
 + * Properly close StreamCompressionInputStream to release any ByteBuf (CASSANDRA-13906)
 + * Add SERIAL and LOCAL_SERIAL support for cassandra-stress (CASSANDRA-13925)
 + * LCS needlessly checks for L0 STCS candidates multiple times (CASSANDRA-12961)
 + * Correctly close netty channels when a stream session ends (CASSANDRA-13905)
 + * Update lz4 to 1.4.0 (CASSANDRA-13741)
 + * Optimize Paxos prepare and propose stage for local requests (CASSANDRA-13862)
 + * Throttle base partitions during MV repair streaming to prevent OOM (CASSANDRA-13299)
 + * Use compaction threshold for STCS in L0 (CASSANDRA-13861)
 + * Fix problem with min_compress_ratio: 1 and disallow ratio < 1 (CASSANDRA-13703)
 + * Add extra information to SASI timeout exception (CASSANDRA-13677)
 + * Add incremental repair support for --hosts, --force, and subrange repair (CASSANDRA-13818)
 + * Rework CompactionStrategyManager.getScanners synchronization (CASSANDRA-13786)
 + * Add additional unit tests for batch behavior, TTLs, Timestamps (CASSANDRA-13846)
 + * Add keyspace and table name in schema validation exception (CASSANDRA-13845)
 + * Emit metrics whenever we hit tombstone failures and warn thresholds (CASSANDRA-13771)
 + * Make netty EventLoopGroups daemon threads (CASSANDRA-13837)
 + * Race condition when closing stream sessions (CASSANDRA-13852)
 + * NettyFactoryTest is failing in trunk on macOS (CASSANDRA-13831)
 + * Allow changing log levels via nodetool for related classes (CASSANDRA-12696)
 + * Add stress profile yaml with LWT (CASSANDRA-7960)
 + * Reduce memory copies and object creations when acting on ByteBufs (CASSANDRA-13789)
 + * Simplify mx4j configuration (Cassandra-13578)
 + * Fix trigger example on 4.0 (CASSANDRA-13796)
 + * Force minumum timeout value (CASSANDRA-9375)
 + * Use netty for streaming (CASSANDRA-12229)
 + * Use netty for internode messaging (CASSANDRA-8457)
 + * Add bytes repaired/unrepaired to nodetool tablestats (CASSANDRA-13774)
 + * Don't delete incremental repair sessions if they still have sstables (CASSANDRA-13758)
 + * Fix pending repair manager index out of bounds check (CASSANDRA-13769)
 + * Don't use RangeFetchMapCalculator when RF=1 (CASSANDRA-13576)
 + * Don't optimise trivial ranges in RangeFetchMapCalculator (CASSANDRA-13664)
 + * Use an ExecutorService for repair commands instead of new Thread(..).start() (CASSANDRA-13594)
 + * Fix race / ref leak in anticompaction (CASSANDRA-13688)
 + * Expose tasks queue length via JMX (CASSANDRA-12758)
 + * Fix race / ref leak in PendingRepairManager (CASSANDRA-13751)
 + * Enable ppc64le runtime as unsupported architecture (CASSANDRA-13615)
 + * Improve sstablemetadata output (CASSANDRA-11483)
 + * Support for migrating legacy users to roles has been dropped (CASSANDRA-13371)
 + * Introduce error metrics for repair (CASSANDRA-13387)
 + * Refactoring to primitive functional interfaces in AuthCache (CASSANDRA-13732)
 + * Update metrics to 3.1.5 (CASSANDRA-13648)
 + * batch_size_warn_threshold_in_kb can now be set at runtime (CASSANDRA-13699)
 + * Avoid always rebuilding secondary indexes at startup (CASSANDRA-13725)
 + * Upgrade JMH from 1.13 to 1.19 (CASSANDRA-13727)
 + * Upgrade SLF4J from 1.7.7 to 1.7.25 (CASSANDRA-12996)
 + * Default for start_native_transport now true if not set in config (CASSANDRA-13656)
 + * Don't add localhost to the graph when calculating where to stream from (CASSANDRA-13583)
 + * Make CDC availability more deterministic via hard-linking (CASSANDRA-12148)
 + * Allow skipping equality-restricted clustering columns in ORDER BY clause (CASSANDRA-10271)
 + * Use common nowInSec for validation compactions (CASSANDRA-13671)
 + * Improve handling of IR prepare failures (CASSANDRA-13672)
 + * Send IR coordinator messages synchronously (CASSANDRA-13673)
 + * Flush system.repair table before IR finalize promise (CASSANDRA-13660)
 + * Fix column filter creation for wildcard queries (CASSANDRA-13650)
 + * Add 'nodetool getbatchlogreplaythrottle' and 'nodetool setbatchlogreplaythrottle' (CASSANDRA-13614)
 + * fix race condition in PendingRepairManager (CASSANDRA-13659)
 + * Allow noop incremental repair state transitions (CASSANDRA-13658)
 + * Run repair with down replicas (CASSANDRA-10446)
 + * Added started & completed repair metrics (CASSANDRA-13598)
 + * Added started & completed repair metrics (CASSANDRA-13598)
 + * Improve secondary index (re)build failure and concurrency handling (CASSANDRA-10130)
 + * Improve calculation of available disk space for compaction (CASSANDRA-13068)
 + * Change the accessibility of RowCacheSerializer for third party row cache plugins (CASSANDRA-13579)
 + * Allow sub-range repairs for a preview of repaired data (CASSANDRA-13570)
 + * NPE in IR cleanup when columnfamily has no sstables (CASSANDRA-13585)
 + * Fix Randomness of stress values (CASSANDRA-12744)
 + * Allow selecting Map values and Set elements (CASSANDRA-7396)
 + * Fast and garbage-free Streaming Histogram (CASSANDRA-13444)
 + * Update repairTime for keyspaces on completion (CASSANDRA-13539)
 + * Add configurable upper bound for validation executor threads (CASSANDRA-13521)
 + * Bring back maxHintTTL propery (CASSANDRA-12982)
 + * Add testing guidelines (CASSANDRA-13497)
 + * Add more repair metrics (CASSANDRA-13531)
 + * RangeStreamer should be smarter when picking endpoints for streaming (CASSANDRA-4650)
 + * Avoid rewrapping an exception thrown for cache load functions (CASSANDRA-13367)
 + * Log time elapsed for each incremental repair phase (CASSANDRA-13498)
 + * Add multiple table operation support to cassandra-stress (CASSANDRA-8780)
 + * Fix incorrect cqlsh results when selecting same columns multiple times (CASSANDRA-13262)
 + * Fix WriteResponseHandlerTest is sensitive to test execution order (CASSANDRA-13421)
 + * Improve incremental repair logging (CASSANDRA-13468)
 + * Start compaction when incremental repair finishes (CASSANDRA-13454)
 + * Add repair streaming preview (CASSANDRA-13257)
 + * Cleanup isIncremental/repairedAt usage (CASSANDRA-13430)
 + * Change protocol to allow sending key space independent of query string (CASSANDRA-10145)
 + * Make gc_log and gc_warn settable at runtime (CASSANDRA-12661)
 + * Take number of files in L0 in account when estimating remaining compaction tasks (CASSANDRA-13354)
 + * Skip building views during base table streams on range movements (CASSANDRA-13065)
 + * Improve error messages for +/- operations on maps and tuples (CASSANDRA-13197)
 + * Remove deprecated repair JMX APIs (CASSANDRA-11530)
 + * Fix version check to enable streaming keep-alive (CASSANDRA-12929)
 + * Make it possible to monitor an ideal consistency level separate from actual consistency level (CASSANDRA-13289)
 + * Outbound TCP connections ignore internode authenticator (CASSANDRA-13324)
 + * Upgrade junit from 4.6 to 4.12 (CASSANDRA-13360)
 + * Cleanup ParentRepairSession after repairs (CASSANDRA-13359)
 + * Upgrade snappy-java to 1.1.2.6 (CASSANDRA-13336)
 + * Incremental repair not streaming correct sstables (CASSANDRA-13328)
 + * Upgrade the jna version to 4.3.0 (CASSANDRA-13300)
 + * Add the currentTimestamp, currentDate, currentTime and currentTimeUUID functions (CASSANDRA-13132)
 + * Remove config option index_interval (CASSANDRA-10671)
 + * Reduce lock contention for collection types and serializers (CASSANDRA-13271)
 + * Make it possible to override MessagingService.Verb ids (CASSANDRA-13283)
 + * Avoid synchronized on prepareForRepair in ActiveRepairService (CASSANDRA-9292)
 + * Adds the ability to use uncompressed chunks in compressed files (CASSANDRA-10520)
 + * Don't flush sstables when streaming for incremental repair (CASSANDRA-13226)
 + * Remove unused method (CASSANDRA-13227)
 + * Fix minor bugs related to #9143 (CASSANDRA-13217)
 + * Output warning if user increases RF (CASSANDRA-13079)
 + * Remove pre-3.0 streaming compatibility code for 4.0 (CASSANDRA-13081)
 + * Add support for + and - operations on dates (CASSANDRA-11936)
 + * Fix consistency of incrementally repaired data (CASSANDRA-9143)
 + * Increase commitlog version (CASSANDRA-13161)
 + * Make TableMetadata immutable, optimize Schema (CASSANDRA-9425)
 + * Refactor ColumnCondition (CASSANDRA-12981)
 + * Parallelize streaming of different keyspaces (CASSANDRA-4663)
 + * Improved compactions metrics (CASSANDRA-13015)
 + * Speed-up start-up sequence by avoiding un-needed flushes (CASSANDRA-13031)
 + * Use Caffeine (W-TinyLFU) for on-heap caches (CASSANDRA-10855)
 + * Thrift removal (CASSANDRA-11115)
 + * Remove pre-3.0 compatibility code for 4.0 (CASSANDRA-12716)
 + * Add column definition kind to dropped columns in schema (CASSANDRA-12705)
 + * Add (automate) Nodetool Documentation (CASSANDRA-12672)
 + * Update bundled cqlsh python driver to 3.7.0 (CASSANDRA-12736)
 + * Reject invalid replication settings when creating or altering a keyspace (CASSANDRA-12681)
 + * Clean up the SSTableReader#getScanner API wrt removal of RateLimiter (CASSANDRA-12422)
 + * Use new token allocation for non bootstrap case as well (CASSANDRA-13080)
 + * Avoid byte-array copy when key cache is disabled (CASSANDRA-13084)
 + * Require forceful decommission if number of nodes is less than replication factor (CASSANDRA-12510)
 + * Allow IN restrictions on column families with collections (CASSANDRA-12654)
 + * Log message size in trace message in OutboundTcpConnection (CASSANDRA-13028)
 + * Add timeUnit Days for cassandra-stress (CASSANDRA-13029)
 + * Add mutation size and batch metrics (CASSANDRA-12649)
 + * Add method to get size of endpoints to TokenMetadata (CASSANDRA-12999)
 + * Expose time spent waiting in thread pool queue (CASSANDRA-8398)
 + * Conditionally update index built status to avoid unnecessary flushes (CASSANDRA-12969)
 + * cqlsh auto completion: refactor definition of compaction strategy options (CASSANDRA-12946)
 + * Add support for arithmetic operators (CASSANDRA-11935)
 + * Add histogram for delay to deliver hints (CASSANDRA-13234)
 + * Fix cqlsh automatic protocol downgrade regression (CASSANDRA-13307)
 + * Changing `max_hint_window_in_ms` at runtime (CASSANDRA-11720)
 + * Trivial format error in StorageProxy (CASSANDRA-13551)
 + * Nodetool repair can hang forever if we lose the notification for the repair completing/failing (CASSANDRA-13480)
 + * Anticompaction can cause noisy log messages (CASSANDRA-13684)
 + * Switch to client init for sstabledump (CASSANDRA-13683)
 + * CQLSH: Don't pause when capturing data (CASSANDRA-13743)
 + * nodetool clearsnapshot requires --all to clear all snapshots (CASSANDRA-13391)
 + * Correctly count range tombstones in traces and tombstone thresholds (CASSANDRA-8527)
 + * cqlshrc.sample uses incorrect option for time formatting (CASSANDRA-14243)
 +
 +
  3.11.5
+  * Add flag to disable SASI indexes, and warnings on creation (CASSANDRA-14866)
  Merged from 3.0:
   * Improve `nodetool status -r` speed (CASSANDRA-14847)
   * Improve merkle tree size and time on heap (CASSANDRA-14096)
@@@ -390,9 -44,11 +391,9 @@@ Merged from 3.0
   * Fix static column order for SELECT * wildcard queries (CASSANDRA-14638)
   * sstableloader should use discovered broadcast address to connect intra-cluster (CASSANDRA-14522)
   * Fix reading columns with non-UTF names from schema (CASSANDRA-14468)
-  Merged from 2.2:
+ Merged from 2.2:
   * CircleCI docker image should bake in more dependencies (CASSANDRA-14985)
 - * Don't enable client transports when bootstrap is pending (CASSANDRA-14525)
   * MigrationManager attempts to pull schema from different major version nodes (CASSANDRA-14928)
 - * Fix incorrect cqlsh results when selecting same columns multiple times (CASSANDRA-13262)
   * Returns null instead of NaN or Infinity in JSON strings (CASSANDRA-14377)
  Merged from 2.1:
   * Paged Range Slice queries with DISTINCT can drop rows from results (CASSANDRA-14956)
diff --cc NEWS.txt
index d9950e3,2feac81..ead28f0
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -113,116 -47,17 +113,131 @@@ New feature
  
  Upgrading
  ---------
 -	- repair_session_max_tree_depth setting has been added to cassandra.yaml to allow operators to reduce
 -	  merkle tree size if repair is creating too much heap pressure. See CASSANDRA-14096 for details.
 +    - CASSANDRA-13241 lowered the default chunk_lengh_in_kb for compresesd tables from
 +      64kb to 16kb. For highly compressible data this can have a noticeable impact
 +      on space utilization. You may want to consider manually specifying this value.
 +    - Additional columns have been added to system_distributed.repair_history,
 +      system_traces.sessions and system_traces.events. As a result select queries
 +      againsts these tables will fail and generate an error in the log
 +      during upgrade when the cluster is mixed version. The tables can be made
 +      readable by following the instructions in CASSANDRA-14897 to add the
 +      new columns to the system tables before upgrading.
 +    - Timestamp ties between values resolve differently: if either value has a TTL,
 +      this value always wins. This is to provide consistent reconciliation before
 +      and after the value expires into a tombstone.
 +    - Cassandra 4.0 removed support for COMPACT STORAGE tables. All Compact Tables
 +      have to be migrated using `ALTER ... DROP COMPACT STORAGE` statement in 3.0/3.11.
 +      Cassandra starting 4.0 will not start if flags indicate that the table is non-CQL.
 +      Syntax for creating compact tables is also deprecated.
 +    - Support for legacy auth tables in the system_auth keyspace (users,
 +      permissions, credentials) and the migration code has been removed. Migration
 +      of these legacy auth tables must have been completed before the upgrade to
 +      4.0 and the legacy tables must have been removed. See the 'Upgrading' section
 +      for version 2.2 for migration instructions.
 +    - Cassandra 4.0 removed support for the deprecated Thrift interface. Amongst
 +      other things, this implies the removal of all yaml options related to thrift
 +      ('start_rpc', rpc_port, ...).
 +    - Cassandra 4.0 removed support for any pre-3.0 format. This means you
 +      cannot upgrade from a 2.x version to 4.0 directly, you have to upgrade to
 +      a 3.0.x/3.x version first (and run upgradesstable). In particular, this
 +      mean Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you
 +      will need to upgrade those sstable in 3.0.x/3.x first.
 +    - Upgrades from 3.0.x or 3.x are supported since 3.0.13 or 3.11.0, previous
 +      versions will causes issues during rolling upgrades (CASSANDRA-13274).
 +    - Cassandra will no longer allow invalid keyspace replication options, such
 +      as invalid datacenter names for NetworkTopologyStrategy. Operators MUST
 +      add new nodes to a datacenter before they can set set ALTER or CREATE
 +      keyspace replication policies using that datacenter. Existing keyspaces
 +      will continue to operate, but CREATE and ALTER will validate that all
 +      datacenters specified exist in the cluster.
 +    - Cassandra 4.0 fixes a problem with incremental repair which caused repaired
 +      data to be inconsistent between nodes. The fix changes the behavior of both
 +      full and incremental repairs. For full repairs, data is no longer marked
 +      repaired. For incremental repairs, anticompaction is run at the beginning
 +      of the repair, instead of at the end. If incremental repair was being used
 +      prior to upgrading, a full repair should be run after upgrading to resolve
 +      any inconsistencies.
 +    - Config option index_interval has been removed (it was deprecated since 2.0)
 +    - Deprecated repair JMX APIs are removed.
 +    - The version of snappy-java has been upgraded to 1.1.2.6
 +	- the miniumum value for internode message timeouts is 10ms. Previously, any
 +	  positive value was allowed. See cassandra.yaml entries like
 +	  read_request_timeout_in_ms for more details.
 +	- Cassandra 4.0 allows a single port to be used for both secure and insecure
 +	  connections between cassandra nodes (CASSANDRA-10404). See the yaml for
 +	  specific property changes, and see the security doc for full details.
 +    - Due to the parallelization of the initial build of materialized views,
 +      the per token range view building status is stored in the new table
 +      `system.view_builds_in_progress`. The old table `system.views_builds_in_progress`
 +      is no longer used and can be removed. See CASSANDRA-12245 for more details.
 +	- Config option commitlog_sync_batch_window_in_ms has been deprecated as it's
 +	  documentation has been incorrect and the setting itself near useless.
 +	  Batch mode remains a valid commit log mode, however.
 +	- There is a new commit log mode, group, which is similar to batch mode
 +	  but blocks for up to a configurable number of milliseconds between disk flushes.
 +	- nodetool clearsnapshot now required the --all flag to remove all snapshots.
 +	  Previous behavior would delete all snapshots by default.
 +    - Nodes are now identified by a combination of IP, and storage port.
 +      Existing JMX APIs, nodetool, and system tables continue to work
 +      and accept/return just an IP, but there is a new
 +      version of each that works with the full unambiguous identifier.
 +      You should prefer these over the deprecated ambiguous versions that only
 +      work with an IP. This was done to support multiple instances per IP.
 +      Additionally we are moving to only using a single port for encrypted and
 +      unencrypted traffic and if you want multiple instances per IP you must
 +      first switch encrypted traffic to the storage port and not a separate
 +      encrypted port. If you want to use multiple instances per IP
 +      with SSL you will need to use StartTLS on storage_port and set
 +      outgoing_encrypted_port_source to gossip outbound connections
 +      know what port to connect to for each instance. Before changing
 +      storage port or native port at nodes you must first upgrade the entire cluster
 +      and clients to 4.0 so they can handle the port not being consistent across
 +      the cluster.
 +    - Names of AWS regions/availability zones have been cleaned up to more correctly
 +      match the Amazon names. There is now a new option in conf/cassandra-rackdc.properties
 +      that lets users enable the correct names for new clusters, or use the legacy
 +      names for existing clusters. See conf/cassandra-rackdc.properties for details.
 +    - Background repair has been removed. dclocal_read_repair_chance and
 +      read_repair_chance table options have been removed and are now rejected.
 +      See CASSANDRA-13910 for details.
 +    - Internode TCP connections that do not ack segments for 30s will now
 +      be automatically detected and closed via the Linux TCP_USER_TIMEOUT
 +      socket option. This should be exceedingly rare, but AWS networks (and
 +      other stateful firewalls) apparently suffer from this issue. You can
 +      tune the timeouts on TCP connection and segment ack via the
 +      `cassandra.yaml:internode_tcp_connect_timeout_in_ms` and
 +      `cassandra.yaml:internode_tcp_user_timeout_in_ms` options respectively.
 +      See CASSANDRA-14358 for details.
 +	- repair_session_space_in_mb setting has been added to cassandra.yaml to allow operators to reduce
 +	  merkle tree size if repair is creating too much heap pressure. The repair_session_max_tree_depth
- 	  setting added in 3.0.19 and 3.11.5 is deprecated in favor of this setting. See CASSANDRA-14096 
++	  setting added in 3.0.19 and 3.11.5 is deprecated in favor of this setting. See CASSANDRA-14096
++    - The flags 'enable_materialized_views' and 'enable_sasi_indexes' in cassandra.yaml
++      have been set as false by default. Operators should modify them to allow the
++      creation of new views and SASI indexes, the existing ones will continue working.
++      See CASSANDRA-14866 for details.
 +
 +Materialized Views
 +-------------------
 +   - Following a discussion regarding concerns about the design and safety of Materialized Views, the C* development
 +     community no longer recommends them for production use, and considers them experimental. Warnings messages will
 +     now be logged when they are created. (See https://www.mail-archive.com/dev@cassandra.apache.org/msg11511.html)
 +   - An 'enable_materialized_views' flag has been added to cassandra.yaml to allow operators to prevent creation of
 +     views
 +   - CREATE MATERIALIZED VIEW syntax has become stricter. Partition key columns are no longer implicitly considered
 +     to be NOT NULL, and no base primary key columns get automatically included in view definition. You have to
 +     specify them explicitly now.
 +
++3.11.5
++======
+ 
+ Experimental features
+ ---------------------
+     - An 'enable_sasi_indexes' flag, true by default, has been added to cassandra.yaml to allow operators to prevent
+       the creation of new SASI indexes, which are considered experimental and are not recommended for production use.
+       (See https://www.mail-archive.com/dev@cassandra.apache.org/msg13582.html)
+     - The flags 'enable_sasi_indexes' and 'enable_materialized_views' have been grouped under an experimental features
+       section in cassandra.yaml.
+ 
  3.11.4
  ======
  
diff --cc conf/cassandra.yaml
index 78fb162,9182008..ca854ca
--- a/conf/cassandra.yaml
+++ b/conf/cassandra.yaml
@@@ -1220,82 -1245,15 +1212,97 @@@ back_pressure_strategy
  #
  # otc_backlog_expiration_interval_ms: 200
  
 +# Track a metric per keyspace indicating whether replication achieved the ideal consistency
 +# level for writes without timing out. This is different from the consistency level requested by
 +# each write which may be lower in order to facilitate availability.
 +# ideal_consistency_level: EACH_QUORUM
 +
 +# Path to write full query log data to when the full query log is enabled
 +# The full query log will recrusively delete the contents of this path at
 +# times. Don't place links in this directory to other parts of the filesystem.
 +#full_query_log_dir: /tmp/cassandrafullquerylog
 +
 +# Automatically upgrade sstables after upgrade - if there is no ordinary compaction to do, the
 +# oldest non-upgraded sstable will get upgraded to the latest version
 +# automatic_sstable_upgrade: false
 +# Limit the number of concurrent sstable upgrades
 +# max_concurrent_automatic_sstable_upgrades: 1
 +
 +# Audit logging - Logs every incoming CQL command request, authentication to a node. See the docs
 +# on audit_logging for full details about the various configuration options.
 +audit_logging_options:
 +    enabled: false
 +    logger: BinAuditLogger
 +    # audit_logs_dir:
 +    # included_keyspaces:
 +    # excluded_keyspaces: system, system_schema, system_virtual_schema
 +    # included_categories:
 +    # excluded_categories:
 +    # included_users:
 +    # excluded_users:
 +    # roll_cycle: HOURLY
 +    # block: true
 +    # max_queue_weight: 268435456 # 256 MiB
 +    # max_log_size: 17179869184 # 16 GiB
 +    ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
 +    # archive_command:
 +    # max_archive_retries: 10
 +
 +
 +# default options for full query logging - these can be overridden from command line when executing
 +# nodetool enablefullquerylog
 +#full_query_logging_options:
 +    # log_dir:
 +    # roll_cycle: HOURLY
 +    # block: true
 +    # max_queue_weight: 268435456 # 256 MiB
 +    # max_log_size: 17179869184 # 16 GiB
 +    ## archive command is "/path/to/script.sh %path" where %path is replaced with the file being rolled:
 +    # archive_command:
 +    # max_archive_retries: 10
 +
 +# validate tombstones on reads and compaction
 +# can be either "disabled", "warn" or "exception"
 +# corrupted_tombstone_strategy: disabled
 +
 +# Diagnostic Events #
 +# If enabled, diagnostic events can be helpful for troubleshooting operational issues. Emitted events contain details
 +# on internal state and temporal relationships across events, accessible by clients via JMX.
 +diagnostic_events_enabled: false
 +
 +# Use native transport TCP message coalescing. If on upgrade to 4.0 you found your throughput decreasing, and in
 +# particular you run an old kernel or have very fewer client connections, this option might be worth evaluating.
 +#native_transport_flush_in_batches_legacy: false
 +
 +# Enable tracking of repaired state of data during reads and comparison between replicas
 +# Mismatches between the repaired sets of replicas can be characterized as either confirmed
 +# or unconfirmed. In this context, unconfirmed indicates that the presence of pending repair
 +# sessions, unrepaired partition tombstones, or some other condition means that the disparity
 +# cannot be considered conclusive. Confirmed mismatches should be a trigger for investigation
 +# as they may be indicative of corruption or data loss.
 +# There are separate flags for range vs partition reads as single partition reads are only tracked
 +# when CL > 1 and a digest mismatch occurs. Currently, range queries don't use digests so if
 +# enabled for range reads, all range reads will include repaired data tracking. As this adds
 +# some overhead, operators may wish to disable it whilst still enabling it for partition reads
 +repaired_data_tracking_for_range_reads_enabled: false
 +repaired_data_tracking_for_partition_reads_enabled: false
 +# If false, only confirmed mismatches will be reported. If true, a separate metric for unconfirmed
 +# mismatches will also be recorded. This is to avoid potential signal:noise issues are unconfirmed
 +# mismatches are less actionable than confirmed ones.
 +report_unconfirmed_repaired_data_mismatches: false
  
+ #########################
+ # EXPERIMENTAL FEATURES #
+ #########################
+ 
+ # Enables materialized view creation on this node.
+ # Materialized views are considered experimental and are not recommended for production use.
 -enable_materialized_views: true
++enable_materialized_views: false
+ 
+ # Enables SASI index creation on this node.
+ # SASI indexes are considered experimental and are not recommended for production use.
 -enable_sasi_indexes: true
++enable_sasi_indexes: false
++
++# Enables creation of transiently replicated keyspaces on this node.
++# Transient replication is experimental and is not recommended for production use.
++enable_transient_replication: false
diff --cc src/java/org/apache/cassandra/config/Config.java
index a95db23,1976b95..04ac608
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@@ -354,10 -346,10 +354,12 @@@ public class Confi
      public boolean enable_user_defined_functions = false;
      public boolean enable_scripted_user_defined_functions = false;
  
--    public boolean enable_materialized_views = true;
++    public boolean enable_materialized_views = false;
 +
 +    public boolean enable_transient_replication = false;
  
 -    public boolean enable_sasi_indexes = true;
++    public boolean enable_sasi_indexes = false;
+ 
      /**
       * Optionally disable asynchronous UDF execution.
       * Disabling asynchronous UDF execution also implicitly disables the security-manager!
diff --cc src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index 3f80d71,99f8575..e5fe772
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@@ -2552,16 -2412,21 +2552,31 @@@ public class DatabaseDescripto
          return conf.enable_materialized_views;
      }
  
+     public static void setEnableMaterializedViews(boolean enableMaterializedViews)
+     {
+         conf.enable_materialized_views = enableMaterializedViews;
+     }
+ 
+     public static boolean getEnableSASIIndexes()
+     {
+         return conf.enable_sasi_indexes;
+     }
+ 
+     public static void setEnableSASIIndexes(boolean enableSASIIndexes)
+     {
+         conf.enable_sasi_indexes = enableSASIIndexes;
+     }
+ 
 +    public static boolean isTransientReplicationEnabled()
 +    {
 +        return conf.enable_transient_replication;
 +    }
 +
 +    public static void setTransientReplicationEnabledUnsafe(boolean enabled)
 +    {
 +        conf.enable_transient_replication = enabled;
 +    }
 +
      public static long getUserDefinedFunctionFailTimeout()
      {
          return conf.user_defined_function_fail_timeout;
diff --cc src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
index dbca160,0000000..18c6511
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateIndexStatement.java
@@@ -1,243 -1,0 +1,258 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.cql3.statements.schema;
 +
 +import java.util.*;
 +
++import com.google.common.collect.ImmutableSet;
 +import com.google.common.collect.Lists;
 +
 +import org.apache.cassandra.audit.AuditLogContext;
 +import org.apache.cassandra.audit.AuditLogEntryType;
 +import org.apache.cassandra.auth.Permission;
++import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.CQLStatement;
 +import org.apache.cassandra.cql3.ColumnIdentifier;
 +import org.apache.cassandra.cql3.QualifiedName;
 +import org.apache.cassandra.cql3.statements.schema.IndexTarget.Type;
 +import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.db.marshal.MapType;
 +import org.apache.cassandra.exceptions.InvalidRequestException;
++import org.apache.cassandra.index.sasi.SASIIndex;
 +import org.apache.cassandra.schema.*;
 +import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
 +import org.apache.cassandra.service.ClientState;
 +import org.apache.cassandra.transport.Event.SchemaChange;
 +import org.apache.cassandra.transport.Event.SchemaChange.Change;
 +import org.apache.cassandra.transport.Event.SchemaChange.Target;
 +
 +import static com.google.common.collect.Iterables.transform;
 +import static com.google.common.collect.Iterables.tryFind;
 +
 +public final class CreateIndexStatement extends AlterSchemaStatement
 +{
 +    private final String indexName;
 +    private final String tableName;
 +    private final List<IndexTarget.Raw> rawIndexTargets;
 +    private final IndexAttributes attrs;
 +    private final boolean ifNotExists;
 +
 +    public CreateIndexStatement(String keyspaceName,
 +                                String tableName,
 +                                String indexName,
 +                                List<IndexTarget.Raw> rawIndexTargets,
 +                                IndexAttributes attrs,
 +                                boolean ifNotExists)
 +    {
 +        super(keyspaceName);
 +        this.tableName = tableName;
 +        this.indexName = indexName;
 +        this.rawIndexTargets = rawIndexTargets;
 +        this.attrs = attrs;
 +        this.ifNotExists = ifNotExists;
 +    }
 +
 +    public Keyspaces apply(Keyspaces schema)
 +    {
 +        attrs.validate();
 +
++        if (attrs.isCustom && attrs.customClass.equals(SASIIndex.class.getName()) && !DatabaseDescriptor.getEnableSASIIndexes())
++            throw new InvalidRequestException("SASI indexes are disabled. Enable in cassandra.yaml to use.");
++
 +        KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
 +        if (null == keyspace)
 +            throw ire("Keyspace '%s' doesn't exist", keyspaceName);
 +
 +        TableMetadata table = keyspace.getTableOrViewNullable(tableName);
 +        if (null == table)
 +            throw ire("Table '%s' doesn't exist", tableName);
 +
 +        if (null != indexName && keyspace.hasIndex(indexName))
 +        {
 +            if (ifNotExists)
 +                return schema;
 +
 +            throw ire("Index '%s' already exists", indexName);
 +        }
 +
 +        if (table.isCounter())
 +            throw ire("Secondary indexes on counter tables aren't supported");
 +
 +        if (table.isView())
 +            throw ire("Secondary indexes on materialized views aren't supported");
 +
 +        if (Keyspace.open(table.keyspace).getReplicationStrategy().hasTransientReplicas())
 +            throw new InvalidRequestException("Secondary indexes are not supported on transiently replicated keyspaces");
 +
 +        List<IndexTarget> indexTargets = Lists.newArrayList(transform(rawIndexTargets, t -> t.prepare(table)));
 +
 +        if (indexTargets.isEmpty() && !attrs.isCustom)
 +            throw ire("Only CUSTOM indexes can be created without specifying a target column");
 +
 +        if (indexTargets.size() > 1)
 +        {
 +            if (!attrs.isCustom)
 +                throw ire("Only CUSTOM indexes support multiple columns");
 +
 +            Set<ColumnIdentifier> columns = new HashSet<>();
 +            for (IndexTarget target : indexTargets)
 +                if (!columns.add(target.column))
 +                    throw ire("Duplicate column '%s' in index target list", target.column);
 +        }
 +
 +        indexTargets.forEach(t -> validateIndexTarget(table, t));
 +
 +        String name = null == indexName ? generateIndexName(keyspace, indexTargets) : indexName;
 +
 +        IndexMetadata.Kind kind = attrs.isCustom ? IndexMetadata.Kind.CUSTOM : IndexMetadata.Kind.COMPOSITES;
 +
 +        Map<String, String> options = attrs.isCustom ? attrs.getOptions() : Collections.emptyMap();
 +
 +        IndexMetadata index = IndexMetadata.fromIndexTargets(indexTargets, name, kind, options);
 +
 +        // check to disallow creation of an index which duplicates an existing one in all but name
 +        IndexMetadata equalIndex = tryFind(table.indexes, i -> i.equalsWithoutName(index)).orNull();
 +        if (null != equalIndex)
 +        {
 +            if (ifNotExists)
 +                return schema;
 +
 +            throw ire("Index %s is a duplicate of existing index %s", index.name, equalIndex.name);
 +        }
 +
 +        TableMetadata newTable = table.withSwapped(table.indexes.with(index));
 +        newTable.validate();
 +
 +        return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.tables.withSwapped(newTable)));
 +    }
 +
++    @Override
++    Set<String> clientWarnings(KeyspacesDiff diff)
++    {
++        if (attrs.isCustom && attrs.customClass.equals(SASIIndex.class.getName()))
++            return ImmutableSet.of(SASIIndex.USAGE_WARNING);
++
++        return ImmutableSet.of();
++    }
++
 +    private void validateIndexTarget(TableMetadata table, IndexTarget target)
 +    {
 +        ColumnMetadata column = table.getColumn(target.column);
 +
 +        if (null == column)
 +            throw ire("Column '%s' doesn't exist", target.column);
 +
 +        if (column.type.referencesDuration())
 +        {
 +            if (column.type.isCollection())
 +                throw ire("Secondary indexes are not supported on collections containing durations");
 +
 +            if (column.type.isTuple())
 +                throw ire("Secondary indexes are not supported on tuples containing durations");
 +
 +            if (column.type.isUDT())
 +                throw  ire("Secondary indexes are not supported on UDTs containing durations");
 +
 +            throw ire("Secondary indexes are not supported on duration columns");
 +        }
 +
 +        if (column.isPartitionKey() && table.partitionKeyColumns().size() == 1)
 +            throw ire("Cannot create secondary index on the only partition key column %s", column);
 +
 +        if (column.type.isFrozenCollection() && target.type != Type.FULL)
 +            throw ire("Cannot create %s() index on frozen column %s. Frozen collections only support full() indexes", target.type, column);
 +
 +        if (!column.type.isFrozenCollection() && target.type == Type.FULL)
 +            throw ire("full() indexes can only be created on frozen collections");
 +
 +        if (!column.type.isCollection() && target.type != Type.SIMPLE)
 +            throw ire("Cannot create %s() index on %s. Non-collection columns only support simple indexes", target.type, column);
 +
 +        if (!(column.type instanceof MapType && column.type.isMultiCell()) && (target.type == Type.KEYS || target.type == Type.KEYS_AND_VALUES))
 +            throw ire("Cannot create index on %s of column %s with non-map type", target.type, column);
 +
 +        if (column.type.isUDT() && column.type.isMultiCell())
 +            throw ire("Cannot create index on non-frozen UDT column %s", column);
 +    }
 +
 +    private String generateIndexName(KeyspaceMetadata keyspace, List<IndexTarget> targets)
 +    {
 +        String baseName = targets.size() == 1
 +                        ? IndexMetadata.generateDefaultIndexName(tableName, targets.get(0).column)
 +                        : IndexMetadata.generateDefaultIndexName(tableName);
 +        return keyspace.findAvailableIndexName(baseName);
 +    }
 +
 +    SchemaChange schemaChangeEvent(KeyspacesDiff diff)
 +    {
 +        return new SchemaChange(Change.UPDATED, Target.TABLE, keyspaceName, tableName);
 +    }
 +
 +    public void authorize(ClientState client)
 +    {
 +        client.ensureTablePermission(keyspaceName, tableName, Permission.ALTER);
 +    }
 +
 +    @Override
 +    public AuditLogContext getAuditLogContext()
 +    {
 +        return new AuditLogContext(AuditLogEntryType.CREATE_INDEX, keyspaceName, indexName);
 +    }
 +
 +    public String toString()
 +    {
 +        return String.format("%s (%s, %s)", getClass().getSimpleName(), keyspaceName, indexName);
 +    }
 +
 +    public static final class Raw extends CQLStatement.Raw
 +    {
 +        private final QualifiedName tableName;
 +        private final QualifiedName indexName;
 +        private final List<IndexTarget.Raw> rawIndexTargets;
 +        private final IndexAttributes attrs;
 +        private final boolean ifNotExists;
 +
 +        public Raw(QualifiedName tableName,
 +                   QualifiedName indexName,
 +                   List<IndexTarget.Raw> rawIndexTargets,
 +                   IndexAttributes attrs,
 +                   boolean ifNotExists)
 +        {
 +            this.tableName = tableName;
 +            this.indexName = indexName;
 +            this.rawIndexTargets = rawIndexTargets;
 +            this.attrs = attrs;
 +            this.ifNotExists = ifNotExists;
 +        }
 +
 +        public CreateIndexStatement prepare(ClientState state)
 +        {
 +            String keyspaceName = tableName.hasKeyspace()
 +                                ? tableName.getKeyspace()
 +                                : indexName.hasKeyspace() ? indexName.getKeyspace() : state.getKeyspace();
 +
 +            if (tableName.hasKeyspace() && !keyspaceName.equals(tableName.getKeyspace()))
 +                throw ire("Keyspace name '%s' doesn't match table name '%s'", keyspaceName, tableName);
 +
 +            if (indexName.hasKeyspace() && !keyspaceName.equals(indexName.getKeyspace()))
 +                throw ire("Keyspace name '%s' doesn't match index name '%s'", keyspaceName, tableName);
 +
 +            return new CreateIndexStatement(keyspaceName, tableName.getName(), indexName.getName(), rawIndexTargets, attrs, ifNotExists);
 +        }
 +    }
 +}
diff --cc src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
index bf6bcff,0000000..7e51eb2
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/schema/CreateViewStatement.java
@@@ -1,423 -1,0 +1,423 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.cql3.statements.schema;
 +
 +import java.util.*;
 +
 +import com.google.common.collect.ImmutableSet;
 +import com.google.common.collect.Lists;
 +
 +import org.apache.cassandra.audit.AuditLogContext;
 +import org.apache.cassandra.audit.AuditLogEntryType;
 +import org.apache.cassandra.auth.Permission;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.*;
 +import org.apache.cassandra.cql3.restrictions.StatementRestrictions;
 +import org.apache.cassandra.cql3.selection.RawSelector;
 +import org.apache.cassandra.cql3.selection.Selectable;
 +import org.apache.cassandra.cql3.statements.StatementType;
- import org.apache.cassandra.db.Keyspace;
 +import org.apache.cassandra.db.marshal.AbstractType;
 +import org.apache.cassandra.db.marshal.ReversedType;
++import org.apache.cassandra.db.view.View;
 +import org.apache.cassandra.exceptions.AlreadyExistsException;
 +import org.apache.cassandra.exceptions.InvalidRequestException;
 +import org.apache.cassandra.schema.*;
 +import org.apache.cassandra.schema.Keyspaces.KeyspacesDiff;
 +import org.apache.cassandra.service.ClientState;
 +import org.apache.cassandra.transport.Event.SchemaChange;
 +import org.apache.cassandra.transport.Event.SchemaChange.Change;
 +import org.apache.cassandra.transport.Event.SchemaChange.Target;
 +
 +import static java.lang.String.join;
 +
 +import static com.google.common.collect.Iterables.concat;
 +import static com.google.common.collect.Iterables.filter;
 +import static com.google.common.collect.Iterables.transform;
 +
 +public final class CreateViewStatement extends AlterSchemaStatement
 +{
 +    private final String tableName;
 +    private final String viewName;
 +
 +    private final List<RawSelector> rawColumns;
 +    private final List<ColumnIdentifier> partitionKeyColumns;
 +    private final List<ColumnIdentifier> clusteringColumns;
 +
 +    private final WhereClause whereClause;
 +
 +    private final LinkedHashMap<ColumnIdentifier, Boolean> clusteringOrder;
 +    private final TableAttributes attrs;
 +
 +    private final boolean ifNotExists;
 +
 +    public CreateViewStatement(String keyspaceName,
 +                               String tableName,
 +                               String viewName,
 +
 +                               List<RawSelector> rawColumns,
 +                               List<ColumnIdentifier> partitionKeyColumns,
 +                               List<ColumnIdentifier> clusteringColumns,
 +
 +                               WhereClause whereClause,
 +
 +                               LinkedHashMap<ColumnIdentifier, Boolean> clusteringOrder,
 +                               TableAttributes attrs,
 +
 +                               boolean ifNotExists)
 +    {
 +        super(keyspaceName);
 +        this.tableName = tableName;
 +        this.viewName = viewName;
 +
 +        this.rawColumns = rawColumns;
 +        this.partitionKeyColumns = partitionKeyColumns;
 +        this.clusteringColumns = clusteringColumns;
 +
 +        this.whereClause = whereClause;
 +
 +        this.clusteringOrder = clusteringOrder;
 +        this.attrs = attrs;
 +
 +        this.ifNotExists = ifNotExists;
 +    }
 +
 +    public Keyspaces apply(Keyspaces schema)
 +    {
-         if (!DatabaseDescriptor.enableMaterializedViews())
++        if (!DatabaseDescriptor.getEnableMaterializedViews())
 +            throw ire("Materialized views are disabled. Enable in cassandra.yaml to use.");
 +
 +        /*
 +         * Basic dependency validations
 +         */
 +
 +        KeyspaceMetadata keyspace = schema.getNullable(keyspaceName);
 +        if (null == keyspace)
 +            throw ire("Keyspace '%s' doesn't exist", keyspaceName);
 +
 +        if (keyspace.createReplicationStrategy().hasTransientReplicas())
 +            throw new InvalidRequestException("Materialized views are not supported on transiently replicated keyspaces");
 +
 +        TableMetadata table = keyspace.tables.getNullable(tableName);
 +        if (null == table)
 +            throw ire("Base table '%s' doesn't exist", tableName);
 +
 +        if (keyspace.hasTable(viewName))
 +            throw ire("Cannot create materialized view '%s' - a table with the same name already exists", viewName);
 +
 +        if (keyspace.hasView(viewName))
 +        {
 +            if (ifNotExists)
 +                return schema;
 +
 +            throw new AlreadyExistsException(keyspaceName, viewName);
 +        }
 +
 +        /*
 +         * Base table validation
 +         */
 +
 +        if (table.isCounter())
 +            throw ire("Materialized views are not supported on counter tables");
 +
 +        if (table.isView())
 +            throw ire("Materialized views cannot be created against other materialized views");
 +
 +        if (table.params.gcGraceSeconds == 0)
 +        {
 +            throw ire("Cannot create materialized view '%s' for base table " +
 +                      "'%s' with gc_grace_seconds of 0, since this value is " +
 +                      "used to TTL undelivered updates. Setting gc_grace_seconds" +
 +                      " too low might cause undelivered updates to expire " +
 +                      "before being replayed.",
 +                      viewName, tableName);
 +        }
 +
 +        /*
 +         * Process SELECT clause
 +         */
 +
 +        Set<ColumnIdentifier> selectedColumns = new HashSet<>();
 +
 +        if (rawColumns.isEmpty()) // SELECT *
 +            table.columns().forEach(c -> selectedColumns.add(c.name));
 +
 +        rawColumns.forEach(selector ->
 +        {
 +            if (null != selector.alias)
 +                throw ire("Cannot use aliases when defining a materialized view (got %s)", selector);
 +
 +            if (!(selector.selectable instanceof Selectable.RawIdentifier))
 +                throw ire("Can only select columns by name when defining a materialized view (got %s)", selector.selectable);
 +
 +            // will throw IRE if the column doesn't exist in the base table
 +            ColumnMetadata column = (ColumnMetadata) selector.selectable.prepare(table);
 +
 +            selectedColumns.add(column.name);
 +        });
 +
 +        selectedColumns.stream()
 +                       .map(table::getColumn)
 +                       .filter(ColumnMetadata::isStatic)
 +                       .findAny()
 +                       .ifPresent(c -> { throw ire("Cannot include static column '%s' in materialized view '%s'", c, viewName); });
 +
 +        /*
 +         * Process PRIMARY KEY columns and CLUSTERING ORDER BY clause
 +         */
 +
 +        if (partitionKeyColumns.isEmpty())
 +            throw ire("Must provide at least one partition key column for materialized view '%s'", viewName);
 +
 +        HashSet<ColumnIdentifier> primaryKeyColumns = new HashSet<>();
 +
 +        concat(partitionKeyColumns, clusteringColumns).forEach(name ->
 +        {
 +            ColumnMetadata column = table.getColumn(name);
 +            if (null == column || !selectedColumns.contains(name))
 +                throw ire("Unknown column '%s' referenced in PRIMARY KEY for materialized view '%s'", name, viewName);
 +
 +            if (!primaryKeyColumns.add(name))
 +                throw ire("Duplicate column '%s' in PRIMARY KEY clause for materialized view '%s'", name, viewName);
 +
 +            AbstractType<?> type = column.type;
 +
 +            if (type.isMultiCell())
 +            {
 +                if (type.isCollection())
 +                    throw ire("Invalid non-frozen collection type '%s' for PRIMARY KEY column '%s'", type, name);
 +                else
 +                    throw ire("Invalid non-frozen user-defined type '%s' for PRIMARY KEY column '%s'", type, name);
 +            }
 +
 +            if (type.isCounter())
 +                throw ire("counter type is not supported for PRIMARY KEY column '%s'", name);
 +
 +            if (type.referencesDuration())
 +                throw ire("duration type is not supported for PRIMARY KEY column '%s'", name);
 +        });
 +
 +        // If we give a clustering order, we must explicitly do so for all aliases and in the order of the PK
 +        if (!clusteringOrder.isEmpty() && !clusteringColumns.equals(new ArrayList<>(clusteringOrder.keySet())))
 +            throw ire("Clustering key columns must exactly match columns in CLUSTERING ORDER BY directive");
 +
 +        /*
 +         * We need to include all of the primary key columns from the base table in order to make sure that we do not
 +         * overwrite values in the view. We cannot support "collapsing" the base table into a smaller number of rows in
 +         * the view because if we need to generate a tombstone, we have no way of knowing which value is currently being
 +         * used in the view and whether or not to generate a tombstone. In order to not surprise our users, we require
 +         * that they include all of the columns. We provide them with a list of all of the columns left to include.
 +         */
 +        List<ColumnIdentifier> missingPrimaryKeyColumns =
 +            Lists.newArrayList(filter(transform(table.primaryKeyColumns(), c -> c.name), c -> !primaryKeyColumns.contains(c)));
 +
 +        if (!missingPrimaryKeyColumns.isEmpty())
 +        {
 +            throw ire("Cannot create materialized view '%s' without primary key columns %s from base table '%s'",
 +                      viewName, join(", ", transform(missingPrimaryKeyColumns, ColumnIdentifier::toString)), tableName);
 +        }
 +
 +        Set<ColumnIdentifier> regularBaseTableColumnsInViewPrimaryKey = new HashSet<>(primaryKeyColumns);
 +        transform(table.primaryKeyColumns(), c -> c.name).forEach(regularBaseTableColumnsInViewPrimaryKey::remove);
 +        if (regularBaseTableColumnsInViewPrimaryKey.size() > 1)
 +        {
 +            throw ire("Cannot include more than one non-primary key column in materialized view primary key (got %s)",
 +                      join(", ", transform(regularBaseTableColumnsInViewPrimaryKey, ColumnIdentifier::toString)));
 +        }
 +
 +        /*
 +         * Process WHERE clause
 +         */
 +
 +        if (whereClause.containsCustomExpressions())
 +            throw ire("WHERE clause for materialized view '%s' cannot contain custom index expressions", viewName);
 +
 +        StatementRestrictions restrictions =
 +            new StatementRestrictions(StatementType.SELECT,
 +                                      table,
 +                                      whereClause,
 +                                      VariableSpecifications.empty(),
 +                                      false,
 +                                      false,
 +                                      true,
 +                                      true);
 +
 +        List<ColumnIdentifier> nonRestrictedPrimaryKeyColumns =
 +            Lists.newArrayList(filter(primaryKeyColumns, name -> !restrictions.isRestricted(table.getColumn(name))));
 +
 +        if (!nonRestrictedPrimaryKeyColumns.isEmpty())
 +        {
 +            throw ire("Primary key columns %s must be restricted with 'IS NOT NULL' or otherwise",
 +                      join(", ", transform(nonRestrictedPrimaryKeyColumns, ColumnIdentifier::toString)));
 +        }
 +
 +        // See CASSANDRA-13798
 +        Set<ColumnMetadata> restrictedNonPrimaryKeyColumns = restrictions.nonPKRestrictedColumns(false);
 +        if (!restrictedNonPrimaryKeyColumns.isEmpty() && !Boolean.getBoolean("cassandra.mv.allow_filtering_nonkey_columns_unsafe"))
 +        {
 +            throw ire("Non-primary key columns can only be restricted with 'IS NOT NULL' (got: %s restricted illegally)",
 +                      join(",", transform(restrictedNonPrimaryKeyColumns, ColumnMetadata::toString)));
 +        }
 +
 +        /*
 +         * Validate WITH params
 +         */
 +
 +        attrs.validate();
 +
 +        if (attrs.hasOption(TableParams.Option.DEFAULT_TIME_TO_LIVE))
 +        {
 +            throw ire("Cannot set default_time_to_live for a materialized view. " +
 +                      "Data in a materialized view always expire at the same time than " +
 +                      "the corresponding data in the parent table.");
 +        }
 +
 +        /*
 +         * Build the thing
 +         */
 +
 +        TableMetadata.Builder builder = TableMetadata.builder(keyspaceName, viewName);
 +
 +        if (attrs.hasProperty(TableAttributes.ID))
 +            builder.id(attrs.getId());
 +
 +        builder.params(attrs.asNewTableParams())
 +               .kind(TableMetadata.Kind.VIEW);
 +
 +        partitionKeyColumns.forEach(name -> builder.addPartitionKeyColumn(name, getType(table, name)));
 +        clusteringColumns.forEach(name -> builder.addClusteringColumn(name, getType(table, name)));
 +
 +        selectedColumns.stream()
 +                       .filter(name -> !primaryKeyColumns.contains(name))
 +                       .forEach(name -> builder.addRegularColumn(name, getType(table, name)));
 +
 +        ViewMetadata view = new ViewMetadata(table.id, table.name, rawColumns.isEmpty(), whereClause, builder.build());
 +        view.metadata.validate();
 +
 +        return schema.withAddedOrUpdated(keyspace.withSwapped(keyspace.views.with(view)));
 +    }
 +
 +    SchemaChange schemaChangeEvent(KeyspacesDiff diff)
 +    {
 +        return new SchemaChange(Change.CREATED, Target.TABLE, keyspaceName, viewName);
 +    }
 +
 +    public void authorize(ClientState client)
 +    {
 +        client.ensureTablePermission(keyspaceName, tableName, Permission.ALTER);
 +    }
 +
 +    private AbstractType<?> getType(TableMetadata table, ColumnIdentifier name)
 +    {
 +        AbstractType<?> type = table.getColumn(name).type;
 +        boolean reverse = !clusteringOrder.getOrDefault(name, true);
 +
 +        if (type.isReversed() && !reverse)
 +            return ((ReversedType) type).baseType;
 +        else if (!type.isReversed() && reverse)
 +            return ReversedType.getInstance(type);
 +        else
 +            return type;
 +    }
 +
 +    @Override
 +    Set<String> clientWarnings(KeyspacesDiff diff)
 +    {
-         return ImmutableSet.of("Materialized views are experimental and are not recommended for production use.");
++        return ImmutableSet.of(View.USAGE_WARNING);
 +    }
 +
 +    @Override
 +    public AuditLogContext getAuditLogContext()
 +    {
 +        return new AuditLogContext(AuditLogEntryType.CREATE_VIEW, keyspaceName, viewName);
 +    }
 +
 +    public String toString()
 +    {
 +        return String.format("%s (%s, %s)", getClass().getSimpleName(), keyspaceName, viewName);
 +    }
 +
 +    public final static class Raw extends CQLStatement.Raw
 +    {
 +        private final QualifiedName tableName;
 +        private final QualifiedName viewName;
 +        private final boolean ifNotExists;
 +
 +        private final List<RawSelector> rawColumns;
 +        private final List<ColumnIdentifier> clusteringColumns = new ArrayList<>();
 +        private List<ColumnIdentifier> partitionKeyColumns;
 +
 +        private final WhereClause whereClause;
 +
 +        private final LinkedHashMap<ColumnIdentifier, Boolean> clusteringOrder = new LinkedHashMap<>();
 +        public final TableAttributes attrs = new TableAttributes();
 +
 +        public Raw(QualifiedName tableName, QualifiedName viewName, List<RawSelector> rawColumns, WhereClause whereClause, boolean ifNotExists)
 +        {
 +            this.tableName = tableName;
 +            this.viewName = viewName;
 +            this.rawColumns = rawColumns;
 +            this.whereClause = whereClause;
 +            this.ifNotExists = ifNotExists;
 +        }
 +
 +        public CreateViewStatement prepare(ClientState state)
 +        {
 +            String keyspaceName = viewName.hasKeyspace() ? viewName.getKeyspace() : state.getKeyspace();
 +
 +            if (tableName.hasKeyspace() && !keyspaceName.equals(tableName.getKeyspace()))
 +                throw ire("Cannot create a materialized view on a table in a different keyspace");
 +
 +            if (!bindVariables.isEmpty())
 +                throw ire("Bind variables are not allowed in CREATE MATERIALIZED VIEW statements");
 +
 +            if (null == partitionKeyColumns)
 +                throw ire("No PRIMARY KEY specifed for view '%s' (exactly one required)", viewName);
 +
 +            return new CreateViewStatement(keyspaceName,
 +                                           tableName.getName(),
 +                                           viewName.getName(),
 +
 +                                           rawColumns,
 +                                           partitionKeyColumns,
 +                                           clusteringColumns,
 +
 +                                           whereClause,
 +
 +                                           clusteringOrder,
 +                                           attrs,
 +
 +                                           ifNotExists);
 +        }
 +
 +        public void setPartitionKeyColumns(List<ColumnIdentifier> columns)
 +        {
 +            partitionKeyColumns = columns;
 +        }
 +
 +        public void markClusteringColumn(ColumnIdentifier column)
 +        {
 +            clusteringColumns.add(column);
 +        }
 +
 +        public void extendClusteringOrder(ColumnIdentifier column, boolean ascending)
 +        {
 +            if (null != clusteringOrder.put(column, ascending))
 +                throw ire("Duplicate column '%s' in CLUSTERING ORDER BY clause for view '%s'", column, viewName);
 +        }
 +    }
 +}
diff --cc test/conf/cassandra-murmur.yaml
index a8288d0,a4b25ba..e933837
--- a/test/conf/cassandra-murmur.yaml
+++ b/test/conf/cassandra-murmur.yaml
@@@ -40,3 -43,3 +40,5 @@@ row_cache_class_name: org.apache.cassan
  row_cache_size_in_mb: 16
  enable_user_defined_functions: true
  enable_scripted_user_defined_functions: true
++enable_sasi_indexes: true
++enable_materialized_views: true
diff --cc test/conf/cassandra.yaml
index 3c09637,96ca9a0..d94c478
--- a/test/conf/cassandra.yaml
+++ b/test/conf/cassandra.yaml
@@@ -45,6 -45,3 +45,8 @@@ row_cache_size_in_mb: 1
  enable_user_defined_functions: true
  enable_scripted_user_defined_functions: true
  prepared_statements_cache_size_mb: 1
 +corrupted_tombstone_strategy: exception
 +stream_entire_sstables: true
 +stream_throughput_outbound_megabits_per_sec: 200000000
++enable_sasi_indexes: true
++enable_materialized_views: true
diff --cc test/unit/org/apache/cassandra/cql3/ViewTest.java
index 4f99d03,8a98a8e..02fa19e
--- a/test/unit/org/apache/cassandra/cql3/ViewTest.java
+++ b/test/unit/org/apache/cassandra/cql3/ViewTest.java
@@@ -39,12 -37,19 +39,15 @@@ import com.datastax.driver.core.excepti
  import org.apache.cassandra.concurrent.SEPExecutor;
  import org.apache.cassandra.concurrent.Stage;
  import org.apache.cassandra.concurrent.StageManager;
 -import org.apache.cassandra.config.CFMetaData;
 -import org.apache.cassandra.config.ColumnDefinition;
+ import org.apache.cassandra.config.DatabaseDescriptor;
++import org.apache.cassandra.db.view.View;
 +import org.apache.cassandra.schema.TableMetadata;
 +import org.apache.cassandra.schema.ColumnMetadata;
  import org.apache.cassandra.db.ColumnFamilyStore;
  import org.apache.cassandra.db.Keyspace;
  import org.apache.cassandra.db.SystemKeyspace;
  import org.apache.cassandra.db.compaction.CompactionManager;
 -import org.apache.cassandra.db.marshal.AsciiType;
 -import org.apache.cassandra.db.view.View;
 -import org.apache.cassandra.exceptions.SyntaxException;
 -import org.apache.cassandra.schema.KeyspaceParams;
+ import org.apache.cassandra.service.ClientWarn;
  import org.apache.cassandra.transport.ProtocolVersion;
  import org.apache.cassandra.utils.FBUtilities;
  
@@@ -1378,18 -1402,70 +1381,66 @@@ public class ViewTest extends CQLTeste
          //Compact the base table
          FBUtilities.waitOnFutures(futures);
  
 -        while (!SystemKeyspace.isViewBuilt(keyspace(), "mv_test"))
 +        while (!SystemKeyspace.isViewBuilt(keyspace(), viewName1))
              Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
  
 -        assertRows(execute("SELECT count(*) FROM mv_test"), row(1024L));
 +        assertRows(execute("SELECT count(*) FROM " + viewName1), row(1024L));
      }
  
 -
 -    @Test(expected = SyntaxException.class)
 -    public void emptyViewNameTest() throws Throwable
 +    @Test
 +    public void testViewBuilderResume() throws Throwable
      {
 -        execute("CREATE MATERIALIZED VIEW \"\" AS SELECT a, b FROM tbl WHERE b IS NOT NULL PRIMARY KEY (b, a)");
 +        for (int i = 1; i <= 8; i *= 2)
 +        {
 +            testViewBuilderResume(i);
 +        }
      }
+ 
 -     @Test(expected = SyntaxException.class)
 -     public void emptyBaseTableNameTest() throws Throwable
 -     {
 -         execute("CREATE MATERIALIZED VIEW myview AS SELECT a, b FROM \"\" WHERE b IS NOT NULL PRIMARY KEY (b, a)");
 -     }
 -
+     /**
+      * Tests that a client warning is issued on materialized view creation.
+      */
+     @Test
+     public void testClientWarningOnCreate() throws Throwable
+     {
+         createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+ 
+         ClientWarn.instance.captureWarnings();
+         String viewName = keyspace() + ".warning_view";
+         execute("CREATE MATERIALIZED VIEW " + viewName +
 -                " AS SELECT v FROM %s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
++                " AS SELECT * FROM %s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
+         views.add(viewName);
+         List<String> warnings = ClientWarn.instance.getWarnings();
+ 
+         Assert.assertNotNull(warnings);
+         Assert.assertEquals(1, warnings.size());
+         Assert.assertEquals(View.USAGE_WARNING, warnings.get(0));
+     }
+ 
+     /**
+      * Tests the configuration flag to disable materialized views.
+      */
+     @Test
+     public void testDisableMaterializedViews() throws Throwable
+     {
+         createTable("CREATE TABLE %s (k int PRIMARY KEY, v int)");
+ 
+         executeNet(protocolVersion, "USE " + keyspace());
+ 
+         boolean enableMaterializedViews = DatabaseDescriptor.getEnableMaterializedViews();
+         try
+         {
+             DatabaseDescriptor.setEnableMaterializedViews(false);
+             createView("view1", "CREATE MATERIALIZED VIEW %s AS SELECT v FROM %%s WHERE k IS NOT NULL AND v IS NOT NULL PRIMARY KEY (v, k)");
+             Assert.fail("Should not be able to create a materialized view if they are disabled");
+         }
+         catch (Throwable e)
+         {
+             Assert.assertTrue(e instanceof InvalidQueryException);
+             Assert.assertTrue(e.getMessage().contains("Materialized views are disabled"));
+         }
+         finally
+         {
+             DatabaseDescriptor.setEnableMaterializedViews(enableMaterializedViews);
+         }
+     }
  }
diff --cc test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
index f46e0b8,17bd196..f78d35e
--- a/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/SASICQLTest.java
@@@ -27,8 -27,11 +27,12 @@@ import org.junit.Test
  import com.datastax.driver.core.Row;
  import com.datastax.driver.core.Session;
  import com.datastax.driver.core.SimpleStatement;
 -import junit.framework.Assert;
 +import org.junit.Assert;
++
+ import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.cql3.CQLTester;
+ import org.apache.cassandra.exceptions.InvalidRequestException;
+ import org.apache.cassandra.service.ClientWarn;
  
  public class SASICQLTest extends CQLTester
  {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org