You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sl...@apache.org on 2015/12/22 11:57:15 UTC

svn commit: r1721342 [2/3] - in /cassandra/site/publish/doc/cql3: CQL-2.1.html CQL-2.2.html CQL-3.0.html

Modified: cassandra/site/publish/doc/cql3/CQL-2.2.html
URL: http://svn.apache.org/viewvc/cassandra/site/publish/doc/cql3/CQL-2.2.html?rev=1721342&r1=1721341&r2=1721342&view=diff
==============================================================================
--- cassandra/site/publish/doc/cql3/CQL-2.2.html (original)
+++ cassandra/site/publish/doc/cql3/CQL-2.2.html Tue Dec 22 10:57:15 2015
@@ -1,4 +1,4 @@
-<?xml version='1.0' encoding='utf-8' ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>CQL-2.2</title></head><body><p><link rel="StyleSheet" href="CQL.css" type="text/css" media="screen"></p><h1 id="CassandraQueryLanguageCQLv3.3.0">Cassandra Query Language (CQL) v3.3.0</h1><span id="tableOfContents"><ol style="list-style: none;"><li><a href="CQL-2.2.html#CassandraQueryLanguageCQLv3.3.0">Cassandra Query Language (CQL) v3.3.0</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#CQLSyntax">CQL Syntax</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#Preamble">Preamble</a></li><li><a href="CQL-2.2.html#Conventions">Conventions</a></li><li><a href="CQL-2.2.html#identifiers">Identifiers and keywords</a></li><li><a href="CQL-2.2.html#constants">Constants</a></li><li><a href="CQL-2.
 2.html#Comments">Comments</a></li><li><a href="CQL-2.2.html#statements">Statements</a></li><li><a href="CQL-2.2.html#preparedStatement">Prepared Statement</a></li></ol></li><li><a href="CQL-2.2.html#dataDefinition">Data Definition</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#createKeyspaceStmt">CREATE KEYSPACE</a></li><li><a href="CQL-2.2.html#useStmt">USE</a></li><li><a href="CQL-2.2.html#alterKeyspaceStmt">ALTER KEYSPACE</a></li><li><a href="CQL-2.2.html#dropKeyspaceStmt">DROP KEYSPACE</a></li><li><a href="CQL-2.2.html#createTableStmt">CREATE TABLE</a></li><li><a href="CQL-2.2.html#alterTableStmt">ALTER TABLE</a></li><li><a href="CQL-2.2.html#dropTableStmt">DROP TABLE</a></li><li><a href="CQL-2.2.html#truncateStmt">TRUNCATE</a></li><li><a href="CQL-2.2.html#createIndexStmt">CREATE INDEX</a></li><li><a href="CQL-2.2.html#dropIndexStmt">DROP INDEX</a></li><li><a href="CQL-2.2.html#createTypeStmt">CREATE TYPE</a></li><li><a href="CQL-2.2.html#alterTypeStmt">ALTER TYPE</
 a></li><li><a href="CQL-2.2.html#dropTypeStmt">DROP TYPE</a></li><li><a href="CQL-2.2.html#createTriggerStmt">CREATE TRIGGER</a></li><li><a href="CQL-2.2.html#dropTriggerStmt">DROP TRIGGER</a></li><li><a href="CQL-2.2.html#createFunctionStmt">CREATE FUNCTION</a></li><li><a href="CQL-2.2.html#dropFunctionStmt">DROP FUNCTION</a></li><li><a href="CQL-2.2.html#createAggregateStmt">CREATE AGGREGATE</a></li><li><a href="CQL-2.2.html#dropAggregateStmt">DROP AGGREGATE</a></li></ol></li><li><a href="CQL-2.2.html#dataManipulation">Data Manipulation</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#insertStmt">INSERT</a></li><li><a href="CQL-2.2.html#updateStmt">UPDATE</a></li><li><a href="CQL-2.2.html#deleteStmt">DELETE</a></li><li><a href="CQL-2.2.html#batchStmt">BATCH</a></li></ol></li><li><a href="CQL-2.2.html#queries">Queries</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#selectStmt">SELECT</a></li></ol></li><li><a href="CQL-2.2.html#databaseRoles">Database Roles</a><
 ol style="list-style: none;"><li><a href="CQL-2.2.html#createRoleStmt">CREATE ROLE</a></li><li><a href="CQL-2.2.html#alterRoleStmt">ALTER ROLE</a></li><li><a href="CQL-2.2.html#dropRoleStmt">DROP ROLE</a></li><li><a href="CQL-2.2.html#grantRoleStmt">GRANT ROLE</a></li><li><a href="CQL-2.2.html#revokeRoleStmt">REVOKE ROLE</a></li><li><a href="CQL-2.2.html#createUserStmt">CREATE USER </a></li><li><a href="CQL-2.2.html#alterUserStmt">ALTER USER </a></li><li><a href="CQL-2.2.html#dropUserStmt">DROP USER </a></li><li><a href="CQL-2.2.html#listUsersStmt">LIST USERS</a></li></ol></li><li><a href="CQL-2.2.html#dataControl">Data Control</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#permissions">Permissions </a></li><li><a href="CQL-2.2.html#grantPermissionsStmt">GRANT PERMISSION</a></li><li><a href="CQL-2.2.html#revokePermissionsStmt">REVOKE PERMISSION</a></li></ol></li><li><a href="CQL-2.2.html#types">Data Types</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#usingti
 mestamps">Working with timestamps</a></li><li><a href="CQL-2.2.html#usingdates">Working with dates</a></li><li><a href="CQL-2.2.html#usingtime">Working with time</a></li><li><a href="CQL-2.2.html#counters">Counters</a></li><li><a href="CQL-2.2.html#collections">Working with collections</a></li></ol></li><li><a href="CQL-2.2.html#functions">Functions</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#tokenFun">Token</a></li><li><a href="CQL-2.2.html#uuidFun">Uuid</a></li><li><a href="CQL-2.2.html#timeuuidFun">Timeuuid functions</a></li><li><a href="CQL-2.2.html#timeFun">Time conversion functions</a></li><li><a href="CQL-2.2.html#blobFun">Blob conversion functions</a></li></ol></li><li><a href="CQL-2.2.html#aggregates">Aggregates</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#countFct">Count</a></li><li><a href="CQL-2.2.html#maxMinFcts">Max and Min</a></li><li><a href="CQL-2.2.html#sumFct">Sum</a></li><li><a href="CQL-2.2.html#avgFct">Avg</a></li></ol></li><li><a h
 ref="CQL-2.2.html#udfs">User-Defined Functions</a></li><li><a href="CQL-2.2.html#udas">User-Defined Aggregates</a></li><li><a href="CQL-2.2.html#json">JSON Support</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#selectJson">SELECT JSON</a></li><li><a href="CQL-2.2.html#insertJson">INSERT JSON</a></li><li><a href="CQL-2.2.html#jsonEncoding">JSON Encoding of Cassandra Data Types</a></li><li><a href="CQL-2.2.html#fromJson">The fromJson() Function</a></li><li><a href="CQL-2.2.html#toJson">The toJson() Function</a></li></ol></li><li><a href="CQL-2.2.html#appendixA">Appendix A: CQL Keywords</a></li><li><a href="CQL-2.2.html#appendixB">Appendix B: CQL Reserved Types</a></li><li><a href="CQL-2.2.html#changes">Changes</a><ol style="list-style: none;"><li><a href="CQL-2.2.html#a3.3.0">3.3.0</a></li><li><a href="CQL-2.2.html#a3.2.0">3.2.0</a></li><li><a href="CQL-2.2.html#a3.1.7">3.1.7</a></li><li><a href="CQL-2.2.html#a3.1.6">3.1.6</a></li><li><a href="CQL-2.2.html#a3.1.5">3.1.5</a
 ></li><li><a href="CQL-2.2.html#a3.1.4">3.1.4</a></li><li><a href="CQL-2.2.html#a3.1.3">3.1.3</a></li><li><a href="CQL-2.2.html#a3.1.2">3.1.2</a></li><li><a href="CQL-2.2.html#a3.1.1">3.1.1</a></li><li><a href="CQL-2.2.html#a3.1.0">3.1.0</a></li><li><a href="CQL-2.2.html#a3.0.5">3.0.5</a></li><li><a href="CQL-2.2.html#a3.0.4">3.0.4</a></li><li><a href="CQL-2.2.html#a3.0.3">3.0.3</a></li><li><a href="CQL-2.2.html#a3.0.2">3.0.2</a></li><li><a href="CQL-2.2.html#a3.0.1">3.0.1</a></li></ol></li><li><a href="CQL-2.2.html#Versioning">Versioning</a></li></ol></li></ol></span><h2 id="CQLSyntax">CQL Syntax</h2><h3 id="Preamble">Preamble</h3><p>This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the <a href="#changes">changes</a> section provides the diff between the different versions of CQL v3.</p><p>CQL v3 offers
  a model very close to SQL in the sense that data is put in <em>tables</em> containing <em>rows</em> of <em>columns</em>. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do <strong>not</strong> refer to the concept of rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.</p><h3 id="Conventions">Conventions</h3><p>To aid in specifying the CQL syntax, we will use the following conventions in this document:</p><ul><li>Language rules will be given in a <a href="http://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form">BNF</a> -like notation:</li></ul><pre class="syntax"><pre>&lt;start> ::= TERMINAL &lt;non-terminal1> &lt;non-terminal1>
+<?xml version='1.0' encoding='utf-8' ?><!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/><title>CQL</title></head><body><p><link rel="StyleSheet" href="CQL.css" type="text/css" media="screen"></p><h1 id="CassandraQueryLanguageCQLv3.3.1">Cassandra Query Language (CQL) v3.3.1</h1><span id="tableOfContents"><ol style="list-style: none;"><li><a href="CQL.html#CassandraQueryLanguageCQLv3.3.1">Cassandra Query Language (CQL) v3.3.1</a><ol style="list-style: none;"><li><a href="CQL.html#CQLSyntax">CQL Syntax</a><ol style="list-style: none;"><li><a href="CQL.html#Preamble">Preamble</a></li><li><a href="CQL.html#Conventions">Conventions</a></li><li><a href="CQL.html#identifiers">Identifiers and keywords</a></li><li><a href="CQL.html#constants">Constants</a></li><li><a href="CQL.html#Comments">Comments</a></l
 i><li><a href="CQL.html#statements">Statements</a></li><li><a href="CQL.html#preparedStatement">Prepared Statement</a></li></ol></li><li><a href="CQL.html#dataDefinition">Data Definition</a><ol style="list-style: none;"><li><a href="CQL.html#createKeyspaceStmt">CREATE KEYSPACE</a></li><li><a href="CQL.html#useStmt">USE</a></li><li><a href="CQL.html#alterKeyspaceStmt">ALTER KEYSPACE</a></li><li><a href="CQL.html#dropKeyspaceStmt">DROP KEYSPACE</a></li><li><a href="CQL.html#createTableStmt">CREATE TABLE</a></li><li><a href="CQL.html#alterTableStmt">ALTER TABLE</a></li><li><a href="CQL.html#dropTableStmt">DROP TABLE</a></li><li><a href="CQL.html#truncateStmt">TRUNCATE</a></li><li><a href="CQL.html#createIndexStmt">CREATE INDEX</a></li><li><a href="CQL.html#dropIndexStmt">DROP INDEX</a></li><li><a href="CQL.html#createTypeStmt">CREATE TYPE</a></li><li><a href="CQL.html#alterTypeStmt">ALTER TYPE</a></li><li><a href="CQL.html#dropTypeStmt">DROP TYPE</a></li><li><a href="CQL.html#createTri
 ggerStmt">CREATE TRIGGER</a></li><li><a href="CQL.html#dropTriggerStmt">DROP TRIGGER</a></li><li><a href="CQL.html#createFunctionStmt">CREATE FUNCTION</a></li><li><a href="CQL.html#dropFunctionStmt">DROP FUNCTION</a></li><li><a href="CQL.html#createAggregateStmt">CREATE AGGREGATE</a></li><li><a href="CQL.html#dropAggregateStmt">DROP AGGREGATE</a></li></ol></li><li><a href="CQL.html#dataManipulation">Data Manipulation</a><ol style="list-style: none;"><li><a href="CQL.html#insertStmt">INSERT</a></li><li><a href="CQL.html#updateStmt">UPDATE</a></li><li><a href="CQL.html#deleteStmt">DELETE</a></li><li><a href="CQL.html#batchStmt">BATCH</a></li></ol></li><li><a href="CQL.html#queries">Queries</a><ol style="list-style: none;"><li><a href="CQL.html#selectStmt">SELECT</a></li></ol></li><li><a href="CQL.html#databaseRoles">Database Roles</a><ol style="list-style: none;"><li><a href="CQL.html#createRoleStmt">CREATE ROLE</a></li><li><a href="CQL.html#alterRoleStmt">ALTER ROLE</a></li><li><a hr
 ef="CQL.html#dropRoleStmt">DROP ROLE</a></li><li><a href="CQL.html#grantRoleStmt">GRANT ROLE</a></li><li><a href="CQL.html#revokeRoleStmt">REVOKE ROLE</a></li><li><a href="CQL.html#createUserStmt">CREATE USER </a></li><li><a href="CQL.html#alterUserStmt">ALTER USER </a></li><li><a href="CQL.html#dropUserStmt">DROP USER </a></li><li><a href="CQL.html#listUsersStmt">LIST USERS</a></li></ol></li><li><a href="CQL.html#dataControl">Data Control</a><ol style="list-style: none;"><li><a href="CQL.html#permissions">Permissions </a></li><li><a href="CQL.html#grantPermissionsStmt">GRANT PERMISSION</a></li><li><a href="CQL.html#revokePermissionsStmt">REVOKE PERMISSION</a></li></ol></li><li><a href="CQL.html#types">Data Types</a><ol style="list-style: none;"><li><a href="CQL.html#usingtimestamps">Working with timestamps</a></li><li><a href="CQL.html#usingdates">Working with dates</a></li><li><a href="CQL.html#usingtime">Working with time</a></li><li><a href="CQL.html#counters">Counters</a></li><
 li><a href="CQL.html#collections">Working with collections</a></li></ol></li><li><a href="CQL.html#functions">Functions</a><ol style="list-style: none;"><li><a href="CQL.html#tokenFun">Token</a></li><li><a href="CQL.html#uuidFun">Uuid</a></li><li><a href="CQL.html#timeuuidFun">Timeuuid functions</a></li><li><a href="CQL.html#timeFun">Time conversion functions</a></li><li><a href="CQL.html#blobFun">Blob conversion functions</a></li></ol></li><li><a href="CQL.html#aggregates">Aggregates</a><ol style="list-style: none;"><li><a href="CQL.html#countFct">Count</a></li><li><a href="CQL.html#maxMinFcts">Max and Min</a></li><li><a href="CQL.html#sumFct">Sum</a></li><li><a href="CQL.html#avgFct">Avg</a></li></ol></li><li><a href="CQL.html#udfs">User-Defined Functions</a></li><li><a href="CQL.html#udas">User-Defined Aggregates</a></li><li><a href="CQL.html#json">JSON Support</a><ol style="list-style: none;"><li><a href="CQL.html#selectJson">SELECT JSON</a></li><li><a href="CQL.html#insertJson"
 >INSERT JSON</a></li><li><a href="CQL.html#jsonEncoding">JSON Encoding of Cassandra Data Types</a></li><li><a href="CQL.html#fromJson">The fromJson() Function</a></li><li><a href="CQL.html#toJson">The toJson() Function</a></li></ol></li><li><a href="CQL.html#appendixA">Appendix A: CQL Keywords</a></li><li><a href="CQL.html#appendixB">Appendix B: CQL Reserved Types</a></li><li><a href="CQL.html#changes">Changes</a><ol style="list-style: none;"><li><a href="CQL.html#a3.3.1">3.3.1</a></li><li><a href="CQL.html#a3.3.0">3.3.0</a></li><li><a href="CQL.html#a3.2.0">3.2.0</a></li><li><a href="CQL.html#a3.1.7">3.1.7</a></li><li><a href="CQL.html#a3.1.6">3.1.6</a></li><li><a href="CQL.html#a3.1.5">3.1.5</a></li><li><a href="CQL.html#a3.1.4">3.1.4</a></li><li><a href="CQL.html#a3.1.3">3.1.3</a></li><li><a href="CQL.html#a3.1.2">3.1.2</a></li><li><a href="CQL.html#a3.1.1">3.1.1</a></li><li><a href="CQL.html#a3.1.0">3.1.0</a></li><li><a href="CQL.html#a3.0.5">3.0.5</a></li><li><a href="CQL.html#
 a3.0.4">3.0.4</a></li><li><a href="CQL.html#a3.0.3">3.0.3</a></li><li><a href="CQL.html#a3.0.2">3.0.2</a></li><li><a href="CQL.html#a3.0.1">3.0.1</a></li></ol></li><li><a href="CQL.html#Versioning">Versioning</a></li></ol></li></ol></span><h2 id="CQLSyntax">CQL Syntax</h2><h3 id="Preamble">Preamble</h3><p>This document describes the Cassandra Query Language (CQL) version 3. CQL v3 is not backward compatible with CQL v2 and differs from it in numerous ways. Note that this document describes the last version of the languages. However, the <a href="#changes">changes</a> section provides the diff between the different versions of CQL v3.</p><p>CQL v3 offers a model very close to SQL in the sense that data is put in <em>tables</em> containing <em>rows</em> of <em>columns</em>. For that reason, when used in this document, these terms (tables, rows and columns) have the same definition than they have in SQL. But please note that as such, they do <strong>not</strong> refer to the concept of
  rows and columns found in the internal implementation of Cassandra and in the thrift and CQL v2 API.</p><h3 id="Conventions">Conventions</h3><p>To aid in specifying the CQL syntax, we will use the following conventions in this document:</p><ul><li>Language rules will be given in a <a href="http://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form">BNF</a> -like notation:</li></ul><pre class="syntax"><pre>&lt;start> ::= TERMINAL &lt;non-terminal1> &lt;non-terminal1>
 </pre></pre><ul><li>Nonterminal symbols will have <code>&lt;angle brackets></code>.</li><li>As additional shortcut notations to BNF, we&#8217;ll use traditional regular expression&#8217;s symbols (<code>?</code>, <code>+</code> and <code>*</code>) to signify that a given symbol is optional and/or can be repeated. We&#8217;ll also allow parentheses to group symbols and the <code>[&lt;characters>]</code> notation to represent any one of <code>&lt;characters></code>.</li><li>The grammar is provided for documentation purposes and leave some minor details out. For instance, the last column definition in a <code>CREATE TABLE</code> statement is optional but supported if present even though the provided grammar in this document suggest it is not supported. </li><li>Sample code will be provided in a code block:</li></ul><pre class="sample"><pre>SELECT sample_usage FROM cql;
 </pre></pre><ul><li>References to keywords or pieces of CQL code in running text will be shown in a <code>fixed-width font</code>.</li></ul><h3 id="identifiers">Identifiers and keywords</h3><p>The CQL language uses <em>identifiers</em> (or <em>names</em>) to identify tables, columns and other objects. An identifier is a token matching the regular expression <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code>.</p><p>A number of such identifiers, like <code>SELECT</code> or <code>WITH</code>, are <em>keywords</em>. They have a fixed meaning for the language and most are reserved. The list of those keywords can be found in <a href="#appendixA">Appendix A</a>.</p><p>Identifiers and (unquoted) keywords are case insensitive. Thus <code>SELECT</code> is the same than <code>select</code> or <code>sElEcT</code>, and <code>myId</code> is the same than <code>myid</code> or <code>MYID</code> for instance. A convention often used (in particular by the samples of this documentation) is t
 o use upper case for keywords and lower case for other identifiers.</p><p>There is a second kind of identifiers called <em>quoted identifiers</em> defined by enclosing an arbitrary sequence of characters in double-quotes(<code>"</code>). Quoted identifiers are never keywords. Thus <code>"select"</code> is not a reserved keyword and can be used to refer to a column, while <code>select</code> would raise a parse error. Also, contrarily to unquoted identifiers and keywords, quoted identifiers are case sensitive (<code>"My Quoted Id"</code> is <em>different</em> from <code>"my quoted id"</code>). A fully lowercase quoted identifier that matches <code>[a-zA-Z]</code><code>[a-zA-Z0-9_]</code><code>*</code> is equivalent to the unquoted identifier obtained by removing the double-quote (so <code>"myid"</code> is equivalent to <code>myid</code> and to <code>myId</code> but different from <code>"myId"</code>). Inside a quoted identifier, the double-quote character can be repeated to escape it
 , so <code>"foo "" bar"</code> is a valid identifier.</p><h3 id="constants">Constants</h3><p>CQL defines the following kind of <em>constants</em>: strings, integers, floats, booleans, uuids and blobs:</p><ul><li>A string constant is an arbitrary sequence of characters characters enclosed by single-quote(<code>'</code>). One can include a single-quote in a string by repeating it, e.g. <code>'It''s raining today'</code>. Those are not to be confused with quoted identifiers that use double-quotes.</li><li>An integer constant is defined by <code>'-'?[0-9]+</code>.</li><li>A float constant is defined by <code>'-'?[0-9]+('.'[0-9]*)?([eE][+-]?[0-9+])?</code>. On top of that, <code>NaN</code> and <code>Infinity</code> are also float constants.</li><li>A boolean constant is either <code>true</code> or <code>false</code> up to case-insensitivity (i.e. <code>True</code> is a valid boolean constant).</li><li>A <a href="http://en.wikipedia.org/wiki/Universally_unique_identifier">UUID</a> constan
 t is defined by <code>hex{8}-hex{4}-hex{4}-hex{4}-hex{12}</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code> and <code>{4}</code> is the number of such characters.</li><li>A blob constant is an hexadecimal number defined by <code>0[xX](hex)+</code> where <code>hex</code> is an hexadecimal character, e.g. <code>[0-9a-fA-F]</code>.</li></ul><p>For how these constants are typed, see the <a href="#types">data types section</a>.</p><h3 id="Comments">Comments</h3><p>A comment in CQL is a line beginning by either double dashes (<code>--</code>) or double slash (<code>//</code>).</p><p>Multi-line comments are also supported through enclosure within <code>/*</code> and <code>*/</code> (but nesting is not supported).</p><pre class="sample"><pre>-- This is a comment
 // This is a comment too
@@ -104,7 +104,7 @@ CREATE TABLE timeline (
 INSERT INTO test(pk, t, v, s) VALUES (0, 0, 'val0', 'static0');
 INSERT INTO test(pk, t, v, s) VALUES (0, 1, 'val1', 'static1');
 SELECT * FROM test WHERE pk=0 AND t=0;
-</pre></pre><p>the last query will return <code>'static1'</code> as value for <code>s</code>, since <code>s</code> is static and thus the 2nd insertion modified this &#8220;shared&#8221; value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for <code>pk</code>), then the 2nd insertion would not have modified the value of <code>s</code> for the first row.</p><p>A few restrictions applies to when static columns are allowed:</p><ul><li>tables with the <code>COMPACT STORAGE</code> option (see below) cannot have them</li><li>a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).</li><li>only non <code>PRIMARY KEY</code> columns can be static</li></ul><h4 id="createTableOptions"><code>&lt;option></code></h4><p>The <code>CREATE TABLE</cod
 e> statement supports a number of options that controls the configuration of a new table. These options can be specified after the <code>WITH</code> keyword.</p><p>The first of these option is <code>COMPACT STORAGE</code>. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see <a href="http://www.datastax.com/dev/blog/thrift-to-cql3">www.datastax.com/dev/blog/thrift-to-cql3</a> for more details).  The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table.  Most notably, <code>COMPACT STORAGE</code> tables cannot have collections nor static columns and a <code>COMPACT STORAGE</code> table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the <code>PRIMARY KEY</code> definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, <code>COMPACT STO
 RAGE</code> is not recommended outside of the backward compatibility reason evoked above.</p><p>Another option is <code>CLUSTERING ORDER</code>. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects <a href="#selectOrderBy">what <code>ORDER BY</code> are allowed during <code>SELECT</code></a>.</p><p>Table creation supports the following other <code>&lt;property></code>:</p><table><tr><th>option                    </th><th>kind   </th><th>default   </th><th>description</th></tr><tr><td><code>comment</code>                    </td><td><em>simple</em> </td><td>none        </td><td>A free-form, human-readable comment.</td></tr><tr><td><code>read_repair_chance</code>         </td><td><em>simple</em> </td><td>0.1         </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpos
 e of read repairs.</td></tr><tr><td><code>dclocal_read_repair_chance</code> </td><td><em>simple</em> </td><td>0           </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.</td></tr><tr><td><code>gc_grace_seconds</code>           </td><td><em>simple</em> </td><td>864000      </td><td>Time to wait before garbage collecting tombstones (deletion markers).</td></tr><tr><td><code>bloom_filter_fp_chance</code>     </td><td><em>simple</em> </td><td>0.00075     </td><td>The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)</td></tr><tr><td><code>compaction</code>                 </td><td><em>map</em>    </td><td><em>see below</em> </td><td>The compaction options to use, se
 e below.</td></tr><tr><td><code>compression</code>                </td><td><em>map</em>    </td><td><em>see below</em> </td><td>Compression options, see below. </td></tr><tr><td><code>caching</code>                    </td><td><em>simple</em> </td><td>keys_only   </td><td>Whether to cache keys (&#8220;key cache&#8221;) and/or rows (&#8220;row cache&#8221;) for this table. Valid values are: <code>all</code>, <code>keys_only</code>, <code>rows_only</code> and <code>none</code>. </td></tr><tr><td><code>default_time_to_live</code>       </td><td><em>simple</em> </td><td>0           </td><td>The default expiration time (&#8220;TTL&#8221;) in seconds for a table.</td></tr></table><h4 id="compactionOptions"><code>compaction</code> options</h4><p>The <code>compaction</code> property must at least define the <code>'class'</code> sub-option, that defines the compaction strategy class to use. The default supported class are <code>'SizeTieredCompactionStrategy'</code> and <code>'LeveledCompacti
 onStrategy'</code>. Custom strategy can be provided by specifying the full class name as a <a href="#constants">string constant</a>. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:</p><table><tr><th>option                         </th><th>supported compaction strategy </th><th>default </th><th>description </th></tr><tr><td><code>enabled</code>                        </td><td><em>all</em>                           </td><td>true      </td><td>A boolean denoting whether compaction should be enabled or not.</td></tr><tr><td><code>tombstone_threshold</code>            </td><td><em>all</em>                           </td><td>0.2       </td><td>A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones. </td></tr><tr><td><code>tombstone_compaction_interval</code>  </td><td><em>all</em>
                            </td><td>1 day     </td><td>The minimum time to wait after an sstable creation time before considering it for &#8220;tombstone compaction&#8221;, where &#8220;tombstone compaction&#8221; is the compaction triggered if the sstable has more gcable tombstones than <code>tombstone_threshold</code>. </td></tr><tr><td><code>unchecked_tombstone_compaction</code> </td><td><em>all</em>                           </td><td>false    </td><td>Setting this to true enables more aggressive tombstone compactions &#8211; single sstable tombstone compactions will run without checking how likely it is that they will be successful. </td></tr><tr><td><code>min_sstable_size</code>               </td><td>SizeTieredCompactionStrategy    </td><td>50MB      </td><td>The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size.  However, for small sizes, this would result in a bucketing that is too fine grained. <code
 >min_sstable_size</code> defines a size threshold (in bytes) below which all SSTables belong to one unique bucket</td></tr><tr><td><code>min_threshold</code>                  </td><td>SizeTieredCompactionStrategy    </td><td>4         </td><td>Minimum number of SSTables needed to start a minor compaction.</td></tr><tr><td><code>max_threshold</code>                  </td><td>SizeTieredCompactionStrategy    </td><td>32        </td><td>Maximum number of SSTables processed by one minor compaction.</td></tr><tr><td><code>bucket_low</code>                     </td><td>SizeTieredCompactionStrategy    </td><td>0.5       </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%)</td></tr><tr><td><code>bucket_high</code>                    </td><td>SizeTieredCompactionStrategy    </td><td>1.5       </td><td>Siz
 e tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%).</td></tr><tr><td><code>sstable_size_in_mb</code>             </td><td>LeveledCompactionStrategy       </td><td>5MB       </td><td>The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to <code>sstable_size_in_mb</code>, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables</td></tr></table><p>For the <code>compression</code> property, the following default sub-options are available:</p><table><tr><th>option              </th><th>default        </th><th>description </th></tr><tr><td><code>sstable_compression</code> </td><td>LZ4Compressor    </td><td>The compression algorithm to use. Default compressor are: LZ
 4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string (<code>''</code>) to disable compression. Custom compressor can be provided by specifying the full class name as a <a href="#constants">string constant</a>.</td></tr><tr><td><code>chunk_length_kb</code>     </td><td>64KB             </td><td>On disk SSTables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read </td></tr><tr><td><code>crc_check_chance</code>    </td><td>1.0              </td><td>When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for in
 stance to check them every other read</td></tr></table><h4 id="Otherconsiderations">Other considerations:</h4><ul><li>When <a href="#insertStmt">inserting</a> / <a href="#updateStmt">updating</a> a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see &lt;a href=#alterStmt><tt>ALTER TABLE</tt></a>) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry when you haven&#8217;t) when creating a table.</li></ul><h3 id="alterTableStmt">ALTER TABLE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) &lt;tablename> &lt;instruction>
+</pre></pre><p>the last query will return <code>'static1'</code> as value for <code>s</code>, since <code>s</code> is static and thus the 2nd insertion modified this &#8220;shared&#8221; value. Note however that static columns are only static within a given partition, and if in the example above both rows where from different partitions (i.e. if they had different value for <code>pk</code>), then the 2nd insertion would not have modified the value of <code>s</code> for the first row.</p><p>A few restrictions applies to when static columns are allowed:</p><ul><li>tables with the <code>COMPACT STORAGE</code> option (see below) cannot have them</li><li>a table without clustering columns cannot have static columns (in a table without clustering columns, every partition has only one row, and so every column is inherently static).</li><li>only non <code>PRIMARY KEY</code> columns can be static</li></ul><h4 id="createTableOptions"><code>&lt;option></code></h4><p>The <code>CREATE TABLE</cod
 e> statement supports a number of options that controls the configuration of a new table. These options can be specified after the <code>WITH</code> keyword.</p><p>The first of these option is <code>COMPACT STORAGE</code>. This option is mainly targeted towards backward compatibility for definitions created before CQL3 (see <a href="http://www.datastax.com/dev/blog/thrift-to-cql3">www.datastax.com/dev/blog/thrift-to-cql3</a> for more details).  The option also provides a slightly more compact layout of data on disk but at the price of diminished flexibility and extensibility for the table.  Most notably, <code>COMPACT STORAGE</code> tables cannot have collections nor static columns and a <code>COMPACT STORAGE</code> table with at least one clustering column supports exactly one (as in not 0 nor more than 1) column not part of the <code>PRIMARY KEY</code> definition (which imply in particular that you cannot add nor remove columns after creation). For those reasons, <code>COMPACT STO
 RAGE</code> is not recommended outside of the backward compatibility reason evoked above.</p><p>Another option is <code>CLUSTERING ORDER</code>. It allows to define the ordering of rows on disk. It takes the list of the clustering column names with, for each of them, the on-disk order (Ascending or descending). Note that this option affects <a href="#selectOrderBy">what <code>ORDER BY</code> are allowed during <code>SELECT</code></a>.</p><p>Table creation supports the following other <code>&lt;property></code>:</p><table><tr><th>option                    </th><th>kind   </th><th>default   </th><th>description</th></tr><tr><td><code>comment</code>                    </td><td><em>simple</em> </td><td>none        </td><td>A free-form, human-readable comment.</td></tr><tr><td><code>read_repair_chance</code>         </td><td><em>simple</em> </td><td>0.1         </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) for the purpos
 e of read repairs.</td></tr><tr><td><code>dclocal_read_repair_chance</code> </td><td><em>simple</em> </td><td>0           </td><td>The probability with which to query extra nodes (e.g. more nodes than required by the consistency level) belonging to the same data center than the read coordinator for the purpose of read repairs.</td></tr><tr><td><code>gc_grace_seconds</code>           </td><td><em>simple</em> </td><td>864000      </td><td>Time to wait before garbage collecting tombstones (deletion markers).</td></tr><tr><td><code>bloom_filter_fp_chance</code>     </td><td><em>simple</em> </td><td>0.00075     </td><td>The target probability of false positive of the sstable bloom filters. Said bloom filters will be sized to provide the provided probability (thus lowering this value impact the size of bloom filters in-memory and on-disk)</td></tr><tr><td><code>default_time_to_live</code>       </td><td><em>simple</em> </td><td>0           </td><td>The default expiration time (&#8220;TTL&
 #8221;) in seconds for a table.</td></tr><tr><td><code>compaction</code>                 </td><td><em>map</em>    </td><td><em>see below</em> </td><td>Compaction options, see <a href="#compactionOptions">below</a>.</td></tr><tr><td><code>compression</code>                </td><td><em>map</em>    </td><td><em>see below</em> </td><td>Compression options, see <a href="#compressionOptions">below</a>.</td></tr><tr><td><code>caching</code>                    </td><td><em>map</em>    </td><td><em>see below</em> </td><td>Caching options, see <a href="#cachingOptions">below</a>.</td></tr></table><h4 id="compactionOptions">Compaction options</h4><p>The <code>compaction</code> property must at least define the <code>'class'</code> sub-option, that defines the compaction strategy class to use. The default supported class are <code>'SizeTieredCompactionStrategy'</code>, <code>'LeveledCompactionStrategy'</code> and <code>'DateTieredCompactionStrategy'</code>. Custom strategy can be provided by sp
 ecifying the full class name as a <a href="#constants">string constant</a>. The rest of the sub-options depends on the chosen class. The sub-options supported by the default classes are:</p><table><tr><th>option                         </th><th>supported compaction strategy </th><th>default    </th><th>description </th></tr><tr><td><code>enabled</code>                        </td><td><em>all</em>                           </td><td>true         </td><td>A boolean denoting whether compaction should be enabled or not.</td></tr><tr><td><code>tombstone_threshold</code>            </td><td><em>all</em>                           </td><td>0.2          </td><td>A ratio such that if a sstable has more than this ratio of gcable tombstones over all contained columns, the sstable will be compacted (with no other sstables) for the purpose of purging those tombstones. </td></tr><tr><td><code>tombstone_compaction_interval</code>  </td><td><em>all</em>                           </td><td>1 day       
  </td><td>The minimum time to wait after an sstable creation time before considering it for &#8220;tombstone compaction&#8221;, where &#8220;tombstone compaction&#8221; is the compaction triggered if the sstable has more gcable tombstones than <code>tombstone_threshold</code>. </td></tr><tr><td><code>unchecked_tombstone_compaction</code> </td><td><em>all</em>                           </td><td>false        </td><td>Setting this to true enables more aggressive tombstone compactions &#8211; single sstable tombstone compactions will run without checking how likely it is that they will be successful. </td></tr><tr><td><code>min_sstable_size</code>               </td><td>SizeTieredCompactionStrategy    </td><td>50MB         </td><td>The size tiered strategy groups SSTables to compact in buckets. A bucket groups SSTables that differs from less than 50% in size.  However, for small sizes, this would result in a bucketing that is too fine grained. <code>min_sstable_size</code> defines a siz
 e threshold (in bytes) below which all SSTables belong to one unique bucket</td></tr><tr><td><code>min_threshold</code>                  </td><td>SizeTieredCompactionStrategy    </td><td>4            </td><td>Minimum number of SSTables needed to start a minor compaction.</td></tr><tr><td><code>max_threshold</code>                  </td><td>SizeTieredCompactionStrategy    </td><td>32           </td><td>Maximum number of SSTables processed by one minor compaction.</td></tr><tr><td><code>bucket_low</code>                     </td><td>SizeTieredCompactionStrategy    </td><td>0.5          </td><td>Size tiered consider sstables to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%)</td></tr><tr><td><code>bucket_high</code>                    </td><td>SizeTieredCompactionStrategy    </td><td>1.5          </td><td>Size tiered consider sstables
  to be within the same bucket if their size is within [average_size * <code>bucket_low</code>, average_size * <code>bucket_high</code> ] (i.e the default groups sstable whose sizes diverges by at most 50%).</td></tr><tr><td><code>sstable_size_in_mb</code>             </td><td>LeveledCompactionStrategy       </td><td>5MB          </td><td>The target size (in MB) for sstables in the leveled strategy. Note that while sstable sizes should stay less or equal to <code>sstable_size_in_mb</code>, it is possible to exceptionally have a larger sstable as during compaction, data for a given partition key are never split into 2 sstables</td></tr><tr><td><code>timestamp_resolution</code>           </td><td>DateTieredCompactionStrategy    </td><td>MICROSECONDS </td><td>The timestamp resolution used when inserting data, could be MILLISECONDS, MICROSECONDS etc (should be understandable by Java TimeUnit)</td></tr><tr><td><code>base_time_seconds</code>              </td><td>DateTieredCompactionStrate
 gy    </td><td>60           </td><td>The base size of the time windows. </td></tr><tr><td><code>max_sstable_age_days</code>           </td><td>DateTieredCompactionStrategy    </td><td>365          </td><td>SSTables only containing data that is older than this will never be compacted. </td></tr></table><h4 id="compressionOptions">Compression options</h4><p>For the <code>compression</code> property, the following sub-options are available:</p><table><tr><th>option              </th><th>default        </th><th>description </th></tr><tr><td><code>sstable_compression</code> </td><td>LZ4Compressor    </td><td>The compression algorithm to use. Default compressor are: LZ4Compressor, SnappyCompressor and DeflateCompressor. Use an empty string (<code>''</code>) to disable compression. Custom compressor can be provided by specifying the full class name as a <a href="#constants">string constant</a>.</td></tr><tr><td><code>chunk_length_kb</code>     </td><td>64KB             </td><td>On disk SST
 ables are compressed by block (to allow random reads). This defines the size (in KB) of said block. Bigger values may improve the compression rate, but increases the minimum size of data to be read from disk for a read </td></tr><tr><td><code>crc_check_chance</code>    </td><td>1.0              </td><td>When compression is enabled, each compressed block includes a checksum of that block for the purpose of detecting disk bitrot and avoiding the propagation of corruption to other replica. This option defines the probability with which those checksums are checked during read. By default they are always checked. Set to 0 to disable checksum checking and to 0.5 for instance to check them every other read</td></tr></table><h4 id="cachingOptions">Caching options</h4><p>For the <code>caching</code> property, the following sub-options are available:</p><table><tr><th>option              </th><th>default        </th><th>description </th></tr><tr><td><code>keys</code>                 </td><td>
 ALL   </td><td>Whether to cache keys (&#8220;key cache&#8221;) for this table. Valid values are: <code>ALL</code> and <code>NONE</code>.</td></tr><tr><td><code>rows_per_partition</code>   </td><td>NONE   </td><td>The amount of rows to cache per partition (&#8220;row cache&#8221;). If an integer <code>n</code> is specified, the first <code>n</code> queried rows of a partition will be cached. Other possible options are <code>ALL</code>, to cache all rows of a queried partition, or <code>NONE</code> to disable row caching.</td></tr></table><h4 id="Otherconsiderations">Other considerations:</h4><ul><li>When <a href="#insertStmt">inserting</a> / <a href="#updateStmt">updating</a> a given row, not all columns needs to be defined (except for those part of the key), and missing columns occupy no space on disk. Furthermore, adding new columns (see &lt;a href=#alterStmt><tt>ALTER TABLE</tt></a>) is a constant time operation. There is thus no need to try to anticipate future usage (or to cry w
 hen you haven&#8217;t) when creating a table.</li></ul><h3 id="alterTableStmt">ALTER TABLE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;alter-table-stmt> ::= ALTER (TABLE | COLUMNFAMILY) &lt;tablename> &lt;instruction>
 
 &lt;instruction> ::= ALTER &lt;identifier> TYPE &lt;type>
                 | ADD   &lt;identifier> &lt;type>
@@ -121,7 +121,7 @@ WITH comment = 'A most excellent and use
  AND read_repair_chance = 0.2;
 </pre></pre><p><br/>The <code>ALTER</code> statement is used to manipulate table definitions. It allows for adding new columns, dropping existing ones, changing the type of existing columns, or updating the table options. As with table creation, <code>ALTER COLUMNFAMILY</code> is allowed as an alias for <code>ALTER TABLE</code>.</p><p>The <code>&lt;tablename></code> is the table name optionally preceded by the keyspace name.  The <code>&lt;instruction></code> defines the alteration to perform:</p><ul><li><code>ALTER</code>: Update the type of a given defined column. Note that the type of the <a href="#createTablepartitionClustering">clustering columns</a> cannot be modified as it induces the on-disk ordering of rows. Columns on which a <a href="#createIndexStmt">secondary index</a> is defined have the same restriction. Other columns are free from those restrictions (no validation of existing data is performed), but it is usually a bad idea to change the type to a non-compatible one,
  unless no data have been inserted for that column yet, as this could confuse CQL drivers/tools.</li><li><code>ADD</code>: Adds a new column to the table. The <code>&lt;identifier></code> for the new column must not conflict with an existing column. Moreover, columns cannot be added to tables defined with the <code>COMPACT STORAGE</code> option.</li><li><code>DROP</code>: Removes a column from the table. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won&#8217;t return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can&#8217;t be dropped from tables defined with the <code>COMPACT STORAGE</code> option.</li><li><code>WITH</code>: Allows to update the options of the table. The <a href="#createTableOptions">supported <code>&lt;option
 ></code></a> (and syntax) are the same as for the <code>CREATE TABLE</code> statement except that <code>COMPACT STORAGE</code> is not supported. Note that setting any <code>compaction</code> sub-options has the effect of erasing all previous <code>compaction</code> options, so you  need to re-specify all the sub-options if you want to keep them. The same note applies to the set of <code>compression</code> sub-options.</li></ul><h3 id="dropTableStmt">DROP TABLE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;drop-table-stmt> ::= DROP TABLE ( IF EXISTS )? &lt;tablename>
 </pre></pre><p><i>Sample:</i></p><pre class="sample"><pre>DROP TABLE worldSeriesAttendees;
-</pre></pre><p>The <code>DROP TABLE</code> statement results in the immediate, irreversible removal of a table, including all data contained in it. As for table creation, <code>DROP COLUMNFAMILY</code> is allowed as an alias for <code>DROP TABLE</code>.</p><p>If the table does not exist, the statement will return an error, unless <code>IF EXISTS</code> is used in which case the operation is a no-op.</p><h3 id="truncateStmt">TRUNCATE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;truncate-stmt> ::= TRUNCATE &lt;tablename>
+</pre></pre><p>The <code>DROP TABLE</code> statement results in the immediate, irreversible removal of a table, including all data contained in it. As for table creation, <code>DROP COLUMNFAMILY</code> is allowed as an alias for <code>DROP TABLE</code>.</p><p>If the table does not exist, the statement will return an error, unless <code>IF EXISTS</code> is used in which case the operation is a no-op.</p><h3 id="truncateStmt">TRUNCATE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;truncate-stmt> ::= TRUNCATE ( TABLE | COLUMNFAMILY )? &lt;tablename>
 </pre></pre><p><i>Sample:</i></p><pre class="sample"><pre>TRUNCATE superImportantData;
 </pre></pre><p>The <code>TRUNCATE</code> statement permanently removes all data from a table.</p><h3 id="createIndexStmt">CREATE INDEX</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;create-index-stmt> ::= CREATE ( CUSTOM )? INDEX ( IF NOT EXISTS )? ( &lt;indexname> )?
                             ON &lt;tablename> '(' &lt;index-identifier> ')'
@@ -212,16 +212,16 @@ DROP FUNCTION afunction ( text );
                             AGGREGATE ( IF NOT EXISTS )?
                             ( &lt;keyspace> '.' )? &lt;aggregate-name>
                             '(' &lt;arg-type> ( ',' &lt;arg-type> )* ')'
-                            SFUNC ( &lt;keyspace> '.' )? &lt;state-functionname>
+                            SFUNC &lt;state-functionname>
                             STYPE &lt;state-type>
-                            ( FINALFUNC ( &lt;keyspace> '.' )? &lt;final-functionname> )?
+                            ( FINALFUNC &lt;final-functionname> )?
                             ( INITCOND &lt;init-cond> )?
 </pre></pre><p><br/><i>Sample:</i></p><pre class="sample"><pre>CREATE AGGREGATE myaggregate ( val text )
   SFUNC myaggregate_state
   STYPE text
   FINALFUNC myaggregate_final
   INITCOND 'foo';
-</pre></pre><p>See the section on <a href="#udas">user-defined aggregates</a> for a complete example.</p><p><code>CREATE AGGREGATE</code> creates or replaces a user-defined aggregate.</p><p><code>CREATE AGGREGATE</code> with the optional <code>OR REPLACE</code> keywords either creates an aggregate or replaces an existing one with the same signature. A <code>CREATE AGGREGATE</code> without <code>OR REPLACE</code> fails if an aggregate with the same signature already exists.</p><p><code>CREATE AGGREGATE</code> with the optional <code>IF NOT EXISTS</code> keywords either creates an aggregate if it does not already exist.</p><p><code>OR REPLACE</code> and <code>IF NOT EXIST</code> cannot be used together.</p><p>Aggregates belong to a keyspace. If no keyspace is specified in <code>&lt;aggregate-name></code>, the current keyspace is used (i.e. the keyspace specified using the <a href="#useStmt"><code>USE</code></a> statement). It is not possible to create a user-defined aggregate in one o
 f the system keyspaces.</p><p>Signatures for user-defined aggregates follow the <a href="#functionSignature">same rules</a> as for user-defined functions.</p><p><code>STYPE</code> defines the type of the state value and must be specified.</p><p>The optional <code>INITCOND</code> defines the initial state value for the aggregate. It defaults to <code>null</code>. A non-@null@ <code>INITCOND</code> must be specified for state functions that are declared with <code>RETURNS NULL ON NULL INPUT</code>.</p><p><code>SFUNC</code> references an existing function to be used as the state modifying function. The type of first argument of the state function must match <code>STYPE</code>. The remaining argument types of the state function must match the argument types of the aggregate function. State is not updated for state functions declared with <code>RETURNS NULL ON NULL INPUT</code> and called with <code>null</code>. Functions from the system keyspace are resolved before functions in the curr
 ent keyspace.</p><p>The optional <code>FINALFUNC</code> is called just before the aggregate result is returned. It must take only one argument with type <code>STYPE</code>. The return type of the <code>FINALFUNC</code> may be a different type. A final function declared with <code>RETURNS NULL ON NULL INPUT</code> means that the aggregate&#8217;s return value will be <code>null</code>, if the last state is <code>null</code>. Functions from the system keyspace are resolved before functions in the current keyspace.</p><p>If no <code>FINALFUNC</code> is defined, the overall return type of the aggregate function is <code>STYPE</code>.  If a <code>FINALFUNC</code> is defined, it is the return type of that function.</p><p>See the section on <a href="#udas">user-defined aggregates</a> for more information.</p><h3 id="dropAggregateStmt">DROP AGGREGATE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;drop-aggregate-stmt> ::= DROP AGGREGATE ( IF EXISTS )?
+</pre></pre><p>See the section on <a href="#udas">user-defined aggregates</a> for a complete example.</p><p><code>CREATE AGGREGATE</code> creates or replaces a user-defined aggregate.</p><p><code>CREATE AGGREGATE</code> with the optional <code>OR REPLACE</code> keywords either creates an aggregate or replaces an existing one with the same signature. A <code>CREATE AGGREGATE</code> without <code>OR REPLACE</code> fails if an aggregate with the same signature already exists.</p><p><code>CREATE AGGREGATE</code> with the optional <code>IF NOT EXISTS</code> keywords either creates an aggregate if it does not already exist.</p><p><code>OR REPLACE</code> and <code>IF NOT EXIST</code> cannot be used together.</p><p>Aggregates belong to a keyspace. If no keyspace is specified in <code>&lt;aggregate-name></code>, the current keyspace is used (i.e. the keyspace specified using the <a href="#useStmt"><code>USE</code></a> statement). It is not possible to create a user-defined aggregate in one o
 f the system keyspaces.</p><p>Signatures for user-defined aggregates follow the <a href="#functionSignature">same rules</a> as for user-defined functions.</p><p><code>STYPE</code> defines the type of the state value and must be specified.</p><p>The optional <code>INITCOND</code> defines the initial state value for the aggregate. It defaults to <code>null</code>. A non-@null@ <code>INITCOND</code> must be specified for state functions that are declared with <code>RETURNS NULL ON NULL INPUT</code>.</p><p><code>SFUNC</code> references an existing function to be used as the state modifying function. The type of first argument of the state function must match <code>STYPE</code>. The remaining argument types of the state function must match the argument types of the aggregate function. State is not updated for state functions declared with <code>RETURNS NULL ON NULL INPUT</code> and called with <code>null</code>.</p><p>The optional <code>FINALFUNC</code> is called just before the aggregat
 e result is returned. It must take only one argument with type <code>STYPE</code>. The return type of the <code>FINALFUNC</code> may be a different type. A final function declared with <code>RETURNS NULL ON NULL INPUT</code> means that the aggregate&#8217;s return value will be <code>null</code>, if the last state is <code>null</code>.</p><p>If no <code>FINALFUNC</code> is defined, the overall return type of the aggregate function is <code>STYPE</code>.  If a <code>FINALFUNC</code> is defined, it is the return type of that function.</p><p>See the section on <a href="#udas">user-defined aggregates</a> for more information.</p><h3 id="dropAggregateStmt">DROP AGGREGATE</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;drop-aggregate-stmt> ::= DROP AGGREGATE ( IF EXISTS )?
                          ( &lt;keyspace> '.' )? &lt;aggregate-name>
                          ( '(' &lt;arg-type> ( ',' &lt;arg-type> )* ')' )?
 </pre></pre><p></p><p><i>Sample:</i></p><pre class="sample"><pre>DROP AGGREGATE myAggregate;
@@ -312,7 +312,7 @@ DELETE phone FROM Users WHERE userid IN
   INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
   DELETE name FROM users WHERE userid = 'user1';
 APPLY BATCH;
-</pre></pre><p>The <code>BATCH</code> statement group multiple modification statements (insertions/updates and deletions) into a single statement. It serves several purposes:</p><ol><li>It saves network round-trips between the client and the server (and sometimes between the server coordinator and the replicas) when batching multiple updates.</li><li>All updates in a <code>BATCH</code> belonging to a given partition key are performed in isolation.</li><li>By default, all operations in the batch are performed atomically.  See the notes on <a href="#unloggedBatch"><code>UNLOGGED</code></a> for more details.</li></ol><p>Note that:</p><ul><li><code>BATCH</code> statements may only contain <code>UPDATE</code>, <code>INSERT</code> and <code>DELETE</code> statements.</li><li>Batches are <em>not</em> a full analogue for SQL transactions.</li><li>If a timestamp is not specified for each operation, then all operations will be applied with the same timestamp. Due to Cassandra&#8217;s conflict 
 resolution procedure in the case of <a href="http://wiki.apache.org/cassandra/FAQ#clocktie">timestamp ties</a>, operations may be applied in an order that is different from the order they are listed in the <code>BATCH</code> statement. To force a particular operation ordering, you must specify per-operation timestamps.</li></ul><h4 id="unloggedBatch"><code>UNLOGGED</code></h4><p>By default, Cassandra uses a batch log to ensure all operations in a batch are applied atomically. (Note that the operations are still only isolated within a single partition.)</p><p>There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur this penalty, you can tell Cassandra to skip the batchlog with the <code>UNLOGGED</code> option. If the <code>UNLOGGED</code> option is used, operations are only atomic within a single partition.</p><h4 id="counterBatch"><code>COUNTER</code></h4><p>Use the <code>COUNTER</code> option for batched counter updates.
   Unlike other updates in Cassandra, counter updates are not idempotent.</p><h4 id="batchOptions"><code>&lt;option></code></h4><p><code>BATCH</code> supports both the <code>TIMESTAMP</code> option, with similar semantic to the one described in the <a href="#updateOptions"><code>UPDATE</code></a> statement (the timestamp applies to all the statement inside the batch). However, if used, <code>TIMESTAMP</code> <strong>must not</strong> be used in the statements within the batch.</p><h2 id="queries">Queries</h2><h3 id="selectStmt">SELECT</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;select-stmt> ::= SELECT ( JSON )? &lt;select-clause>
+</pre></pre><p>The <code>BATCH</code> statement group multiple modification statements (insertions/updates and deletions) into a single statement. It serves several purposes:</p><ol><li>It saves network round-trips between the client and the server (and sometimes between the server coordinator and the replicas) when batching multiple updates.</li><li>All updates in a <code>BATCH</code> belonging to a given partition key are performed in isolation.</li><li>By default, all operations in the batch are performed as <code>LOGGED</code>, to ensure all mutations eventually complete (or none will).  See the notes on <a href="#unloggedBatch"><code>UNLOGGED</code></a> for more details.</li></ol><p>Note that:</p><ul><li><code>BATCH</code> statements may only contain <code>UPDATE</code>, <code>INSERT</code> and <code>DELETE</code> statements.</li><li>Batches are <em>not</em> a full analogue for SQL transactions.</li><li>If a timestamp is not specified for each operation, then all operations wil
 l be applied with the same timestamp. Due to Cassandra&#8217;s conflict resolution procedure in the case of <a href="http://wiki.apache.org/cassandra/FAQ#clocktie">timestamp ties</a>, operations may be applied in an order that is different from the order they are listed in the <code>BATCH</code> statement. To force a particular operation ordering, you must specify per-operation timestamps.</li></ul><h4 id="unloggedBatch"><code>UNLOGGED</code></h4><p>By default, Cassandra uses a batch log to ensure all operations in a batch eventually complete or none will (note however that operations are only isolated within a single partition).</p><p>There is a performance penalty for batch atomicity when a batch spans multiple partitions. If you do not want to incur this penalty, you can tell Cassandra to skip the batchlog with the <code>UNLOGGED</code> option. If the <code>UNLOGGED</code> option is used, a failed batch might leave the patch only partly applied.</p><h4 id="counterBatch"><code>COU
 NTER</code></h4><p>Use the <code>COUNTER</code> option for batched counter updates.  Unlike other updates in Cassandra, counter updates are not idempotent.</p><h4 id="batchOptions"><code>&lt;option></code></h4><p><code>BATCH</code> supports both the <code>TIMESTAMP</code> option, with similar semantic to the one described in the <a href="#updateOptions"><code>UPDATE</code></a> statement (the timestamp applies to all the statement inside the batch). However, if used, <code>TIMESTAMP</code> <strong>must not</strong> be used in the statements within the batch.</p><h2 id="queries">Queries</h2><h3 id="selectStmt">SELECT</h3><p><i>Syntax:</i></p><pre class="syntax"><pre>&lt;select-stmt> ::= SELECT ( JSON )? &lt;select-clause>
                   FROM &lt;tablename>
                   ( WHERE &lt;where-clause> )?
                   ( ORDER BY &lt;order-by> )?
@@ -516,10 +516,12 @@ REVOKE DESCRIBE ON ALL ROLES FROM role_a
                 | float
                 | inet
                 | int
+                | smallint
                 | text
                 | time
                 | timestamp
                 | timeuuid
+                | tinyint
                 | uuid
                 | varchar
                 | varint
@@ -528,7 +530,7 @@ REVOKE DESCRIBE ON ALL ROLES FROM role_a
                     | set  '&lt;' &lt;native-type> '>'
                     | map  '&lt;' &lt;native-type> ',' &lt;native-type> '>'
 &lt;tuple-type> ::= tuple '&lt;' &lt;type> (',' &lt;type>)* '>'
-</pre></pre><p>Note that the native types are keywords and as such are case-insensitive. They are however not reserved ones.</p><p>The following table gives additional informations on the native data types, and on which kind of <a href="#constants">constants</a> each type supports:</p><table><tr><th>type    </th><th>constants supported</th><th>description</th></tr><tr><td><code>ascii</code>    </td><td>  strings            </td><td>ASCII character string</td></tr><tr><td><code>bigint</code>   </td><td>  integers           </td><td>64-bit signed long</td></tr><tr><td><code>blob</code>     </td><td>  blobs              </td><td>Arbitrary bytes (no validation)</td></tr><tr><td><code>boolean</code>  </td><td>  booleans           </td><td>true or false</td></tr><tr><td><code>counter</code>  </td><td>  integers           </td><td>Counter column (64-bit signed value). See <a href="#counters">Counters</a> for details</td></tr><tr><td><code>date</code>     </td><td>  integers, strings  </td>
 <td>A date (with no corresponding time value).  See <a href="#usingdates">Working with dates</a> below for more information.</td></tr><tr><td><code>decimal</code>  </td><td>  integers, floats   </td><td>Variable-precision decimal</td></tr><tr><td><code>double</code>   </td><td>  integers           </td><td>64-bit IEEE-754 floating point</td></tr><tr><td><code>float</code>    </td><td>  integers, floats   </td><td>32-bit IEEE-754 floating point</td></tr><tr><td><code>inet</code>     </td><td>  strings            </td><td>An IP address. It can be either 4 bytes long (IPv4) or 16 bytes long (IPv6). There is no <code>inet</code> constant, IP address should be inputed as strings</td></tr><tr><td><code>int</code>      </td><td>  integers           </td><td>32-bit signed int</td></tr><tr><td><code>text</code>     </td><td>  strings            </td><td>UTF8 encoded string</td></tr><tr><td><code>time</code>     </td><td>  integers, strings  </td><td>A time with nanosecond precision.  See <a 
 href="#usingtime">Working with time</a> below for more information.</td></tr><tr><td><code>timestamp</code></td><td>  integers, strings  </td><td>A timestamp. Strings constant are allow to input timestamps as dates, see <a href="#usingtimestamps">Working with timestamps</a> below for more information.</td></tr><tr><td><code>timeuuid</code> </td><td>  uuids              </td><td>Type 1 UUID. This is generally used as a &#8220;conflict-free&#8221; timestamp. Also see the <a href="#timeuuidFun">functions on Timeuuid</a></td></tr><tr><td><code>uuid</code>     </td><td>  uuids              </td><td>Type 1 or type 4 UUID</td></tr><tr><td><code>varchar</code>  </td><td>  strings            </td><td>UTF8 encoded string</td></tr><tr><td><code>varint</code>   </td><td>  integers           </td><td>Arbitrary-precision integer</td></tr></table><p>For more information on how to use the collection types, see the <a href="#collections">Working with collections</a> section below.</p><h3 id="usingti
 mestamps">Working with timestamps</h3><p>Values of the <code>timestamp</code> type are encoded as 64-bit signed integers representing a number of milliseconds since the standard base time known as &#8220;the epoch&#8221;: January 1 1970 at 00:00:00 GMT.</p><p>Timestamp can be input in CQL as simple long integers, giving the number of milliseconds since the epoch, as defined above.</p><p>They can also be input as string literals in any of the following ISO 8601 formats, each representing the time and date Mar 2, 2011, at 04:05:00 AM, GMT.:</p><ul><li><code>2011-02-03 04:05+0000</code></li><li><code>2011-02-03 04:05:00+0000</code></li><li><code>2011-02-03 04:05:00.000+0000</code></li><li><code>2011-02-03T04:05+0000</code></li><li><code>2011-02-03T04:05:00+0000</code></li><li><code>2011-02-03T04:05:00.000+0000</code></li></ul><p>The <code>+0000</code> above is an RFC 822 4-digit time zone specification; <code>+0000</code> refers to GMT. US Pacific Standard Time is <code>-0800</code>. T
 he time zone may be omitted if desired&#8212; the date will be interpreted as being in the time zone under which the coordinating Cassandra node is configured.</p><ul><li><code>2011-02-03 04:05</code></li><li><code>2011-02-03 04:05:00</code></li><li><code>2011-02-03 04:05:00.000</code></li><li><code>2011-02-03T04:05</code></li><li><code>2011-02-03T04:05:00</code></li><li><code>2011-02-03T04:05:00.000</code></li></ul><p>There are clear difficulties inherent in relying on the time zone configuration being as expected, though, so it is recommended that the time zone always be specified for timestamps when feasible.</p><p>The time of day may also be omitted, if the date is the only piece that matters:</p><ul><li><code>2011-02-03</code></li><li><code>2011-02-03+0000</code></li></ul><p>In that case, the time of day will default to 00:00:00, in the specified or default time zone.</p><h3 id="usingdates">Working with dates</h3><p>Values of the <code>date</code> type are encoded as 32-bit uns
 igned integers representing a number of days with &#8220;the epoch&#8221; at the center of the range (2^31). Epoch is January 1st, 1970</p><p>A date can be input in CQL as an unsigned integer as defined above.</p><p>They can also be input as string literals in the following format:</p><ul><li><code>2014-01-01</code></li></ul><h3 id="usingtime">Working with time</h3><p>Values of the <code>time</code> type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.</p><p>A time can be input in CQL as simple long integers, giving the number of nanoseconds since midnight.</p><p>They can also be input as string literals in any of the following formats:</p><ul><li><code>08:12:54</code></li><li><code>08:12:54.123</code></li><li><code>08:12:54.123456</code></li><li><code>08:12:54.123456789</code></li></ul><h3 id="counters">Counters</h3><p>The <code>counter</code> type is used to define <em>counter columns</em>. A counter column is a column whose value is a 6
 4-bit signed integer and on which 2 operations are supported: incrementation and decrementation (see <a href="#updateStmt"><code>UPDATE</code></a> for syntax).  Note the value of a counter cannot be set. A counter doesn&#8217;t exist until first incremented/decremented, and the first incrementation/decrementation is made as if the previous value was 0. Deletion of counter columns is supported but have some limitations (see the <a href="http://wiki.apache.org/cassandra/Counters">Cassandra Wiki</a> for more information).</p><p>The use of the counter type is limited in the following way:</p><ul><li>It cannot be used for column that is part of the <code>PRIMARY KEY</code> of a table.</li><li>A table that contains a counter can only contain counters. In other words, either all the columns of a table outside the <code>PRIMARY KEY</code> have the counter type, or none of them have it.</li></ul><h3 id="collections">Working with collections</h3><h4 id="Noteworthycharacteristics">Noteworthy c
 haracteristics</h4><p>Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like &#8220;the phone numbers of a given user&#8221;, &#8220;labels applied to an email&#8221;, etc. But when items are expected to grow unbounded (&#8220;all the messages sent by a given user&#8221;, &#8220;events registered by a sensor&#8221;, ...), then collections are not appropriate anymore and a specific table (with clustering columns) should be used. Concretely, collections have the following limitations:</p><ul><li>Collections are always read in their entirety (and reading one is not paged internally).</li><li>Collections cannot have more than 65535 elements. More precisely, while it may be possible to insert more than 65535 elements, it is not possible to read more than the 65535 first elements (see <a href="https://issues.apache.org/jira/browse/CASSANDRA-5428">CASSANDRA-5428</a> for details).</li><li>While insertion operations on sets and maps ne
 ver incur a read-before-write internally, some operations on lists do (see the section on lists below for details). It is thus advised to prefer sets over lists when possible.</li></ul><p>Please note that while some of those limitations may or may not be loosen in the future, the general rule that collections are for denormalizing small amount of data is meant to stay.</p><h4 id="map">Maps</h4><p>A <code>map</code> is a <a href="#types">typed</a> set of key-value pairs, where keys are unique. Furthermore, note that the map are internally sorted by their keys and will thus always be returned in that order. To create a column of type <code>map</code>, use the <code>map</code> keyword suffixed with comma-separated key and value types, enclosed in angle brackets.  For example:</p><pre class="sample"><pre>CREATE TABLE users (
+</pre></pre><p>Note that the native types are keywords and as such are case-insensitive. They are however not reserved ones.</p><p>The following table gives additional informations on the native data types, and on which kind of <a href="#constants">constants</a> each type supports:</p><table><tr><th>type    </th><th>constants supported</th><th>description</th></tr><tr><td><code>ascii</code>    </td><td>  strings            </td><td>ASCII character string</td></tr><tr><td><code>bigint</code>   </td><td>  integers           </td><td>64-bit signed long</td></tr><tr><td><code>blob</code>     </td><td>  blobs              </td><td>Arbitrary bytes (no validation)</td></tr><tr><td><code>boolean</code>  </td><td>  booleans           </td><td>true or false</td></tr><tr><td><code>counter</code>  </td><td>  integers           </td><td>Counter column (64-bit signed value). See <a href="#counters">Counters</a> for details</td></tr><tr><td><code>date</code>     </td><td>  integers, strings  </td>
 <td>A date (with no corresponding time value).  See <a href="#usingdates">Working with dates</a> below for more information.</td></tr><tr><td><code>decimal</code>  </td><td>  integers, floats   </td><td>Variable-precision decimal</td></tr><tr><td><code>double</code>   </td><td>  integers           </td><td>64-bit IEEE-754 floating point</td></tr><tr><td><code>float</code>    </td><td>  integers, floats   </td><td>32-bit IEEE-754 floating point</td></tr><tr><td><code>inet</code>     </td><td>  strings            </td><td>An IP address. It can be either 4 bytes long (IPv4) or 16 bytes long (IPv6). There is no <code>inet</code> constant, IP address should be inputed as strings</td></tr><tr><td><code>int</code>      </td><td>  integers           </td><td>32-bit signed int</td></tr><tr><td><code>smallint</code> </td><td>  integers           </td><td>16-bit signed int</td></tr><tr><td><code>text</code>     </td><td>  strings            </td><td>UTF8 encoded string</td></tr><tr><td><code>t
 ime</code>     </td><td>  integers, strings  </td><td>A time with nanosecond precision.  See <a href="#usingtime">Working with time</a> below for more information.</td></tr><tr><td><code>timestamp</code></td><td>  integers, strings  </td><td>A timestamp. Strings constant are allow to input timestamps as dates, see <a href="#usingtimestamps">Working with timestamps</a> below for more information.</td></tr><tr><td><code>timeuuid</code> </td><td>  uuids              </td><td>Type 1 UUID. This is generally used as a &#8220;conflict-free&#8221; timestamp. Also see the <a href="#timeuuidFun">functions on Timeuuid</a></td></tr><tr><td><code>tinyint</code>  </td><td>  integers           </td><td>8-bit signed int</td></tr><tr><td><code>uuid</code>     </td><td>  uuids              </td><td>Type 1 or type 4 UUID</td></tr><tr><td><code>varchar</code>  </td><td>  strings            </td><td>UTF8 encoded string</td></tr><tr><td><code>varint</code>   </td><td>  integers           </td><td>Arbitra
 ry-precision integer</td></tr></table><p>For more information on how to use the collection types, see the <a href="#collections">Working with collections</a> section below.</p><h3 id="usingtimestamps">Working with timestamps</h3><p>Values of the <code>timestamp</code> type are encoded as 64-bit signed integers representing a number of milliseconds since the standard base time known as &#8220;the epoch&#8221;: January 1 1970 at 00:00:00 GMT.</p><p>Timestamp can be input in CQL as simple long integers, giving the number of milliseconds since the epoch, as defined above.</p><p>They can also be input as string literals in any of the following ISO 8601 formats, each representing the time and date Mar 2, 2011, at 04:05:00 AM, GMT.:</p><ul><li><code>2011-02-03 04:05+0000</code></li><li><code>2011-02-03 04:05:00+0000</code></li><li><code>2011-02-03 04:05:00.000+0000</code></li><li><code>2011-02-03T04:05+0000</code></li><li><code>2011-02-03T04:05:00+0000</code></li><li><code>2011-02-03T04:05
 :00.000+0000</code></li></ul><p>The <code>+0000</code> above is an RFC 822 4-digit time zone specification; <code>+0000</code> refers to GMT. US Pacific Standard Time is <code>-0800</code>. The time zone may be omitted if desired&#8212; the date will be interpreted as being in the time zone under which the coordinating Cassandra node is configured.</p><ul><li><code>2011-02-03 04:05</code></li><li><code>2011-02-03 04:05:00</code></li><li><code>2011-02-03 04:05:00.000</code></li><li><code>2011-02-03T04:05</code></li><li><code>2011-02-03T04:05:00</code></li><li><code>2011-02-03T04:05:00.000</code></li></ul><p>There are clear difficulties inherent in relying on the time zone configuration being as expected, though, so it is recommended that the time zone always be specified for timestamps when feasible.</p><p>The time of day may also be omitted, if the date is the only piece that matters:</p><ul><li><code>2011-02-03</code></li><li><code>2011-02-03+0000</code></li></ul><p>In that case, t
 he time of day will default to 00:00:00, in the specified or default time zone.</p><h3 id="usingdates">Working with dates</h3><p>Values of the <code>date</code> type are encoded as 32-bit unsigned integers representing a number of days with &#8220;the epoch&#8221; at the center of the range (2^31). Epoch is January 1st, 1970</p><p>A date can be input in CQL as an unsigned integer as defined above.</p><p>They can also be input as string literals in the following format:</p><ul><li><code>2014-01-01</code></li></ul><h3 id="usingtime">Working with time</h3><p>Values of the <code>time</code> type are encoded as 64-bit signed integers representing the number of nanoseconds since midnight.</p><p>A time can be input in CQL as simple long integers, giving the number of nanoseconds since midnight.</p><p>They can also be input as string literals in any of the following formats:</p><ul><li><code>08:12:54</code></li><li><code>08:12:54.123</code></li><li><code>08:12:54.123456</code></li><li><code
 >08:12:54.123456789</code></li></ul><h3 id="counters">Counters</h3><p>The <code>counter</code> type is used to define <em>counter columns</em>. A counter column is a column whose value is a 64-bit signed integer and on which 2 operations are supported: incrementation and decrementation (see <a href="#updateStmt"><code>UPDATE</code></a> for syntax).  Note the value of a counter cannot be set. A counter doesn&#8217;t exist until first incremented/decremented, and the first incrementation/decrementation is made as if the previous value was 0. Deletion of counter columns is supported but have some limitations (see the <a href="http://wiki.apache.org/cassandra/Counters">Cassandra Wiki</a> for more information).</p><p>The use of the counter type is limited in the following way:</p><ul><li>It cannot be used for column that is part of the <code>PRIMARY KEY</code> of a table.</li><li>A table that contains a counter can only contain counters. In other words, either all the columns of a table 
 outside the <code>PRIMARY KEY</code> have the counter type, or none of them have it.</li></ul><h3 id="collections">Working with collections</h3><h4 id="Noteworthycharacteristics">Noteworthy characteristics</h4><p>Collections are meant for storing/denormalizing relatively small amount of data. They work well for things like &#8220;the phone numbers of a given user&#8221;, &#8220;labels applied to an email&#8221;, etc. But when items are expected to grow unbounded (&#8220;all the messages sent by a given user&#8221;, &#8220;events registered by a sensor&#8221;, ...), then collections are not appropriate anymore and a specific table (with clustering columns) should be used. Concretely, collections have the following limitations:</p><ul><li>Collections are always read in their entirety (and reading one is not paged internally).</li><li>Collections cannot have more than 65535 elements. More precisely, while it may be possible to insert more than 65535 elements, it is not possible to read
  more than the 65535 first elements (see <a href="https://issues.apache.org/jira/browse/CASSANDRA-5428">CASSANDRA-5428</a> for details).</li><li>While insertion operations on sets and maps never incur a read-before-write internally, some operations on lists do (see the section on lists below for details). It is thus advised to prefer sets over lists when possible.</li></ul><p>Please note that while some of those limitations may or may not be loosen in the future, the general rule that collections are for denormalizing small amount of data is meant to stay.</p><h4 id="map">Maps</h4><p>A <code>map</code> is a <a href="#types">typed</a> set of key-value pairs, where keys are unique. Furthermore, note that the map are internally sorted by their keys and will thus always be returned in that order. To create a column of type <code>map</code>, use the <code>map</code> keyword suffixed with comma-separated key and value types, enclosed in angle brackets.  For example:</p><pre class="sample"
 ><pre>CREATE TABLE users (
     id text PRIMARY KEY,
     given text,
     surname text,
@@ -573,7 +575,7 @@ UPDATE plays SET scores = scores - [ 12,
 )
 </pre></pre><p>then the <code>token</code> function will take a single argument of type <code>text</code> (in that case, the partition key is <code>userid</code> (there is no clustering columns so the partition key is the same than the primary key)), and the return type will be <code>bigint</code>.</p><h3 id="uuidFun">Uuid</h3><p>The <code>uuid</code> function takes no parameters and generates a random type 4 uuid suitable for use in INSERT or SET statements.</p><h3 id="timeuuidFun">Timeuuid functions</h3><h4 id="now"><code>now</code></h4><p>The <code>now</code> function takes no arguments and generates a new unique timeuuid (at the time where the statement using it is executed). Note that this method is useful for insertion but is largely non-sensical in <code>WHERE</code> clauses. For instance, a query of the form</p><pre class="sample"><pre>SELECT * FROM myTable WHERE t = now()
 </pre></pre><p>will never return any result by design, since the value returned by <code>now()</code> is guaranteed to be unique.</p><h4 id="minTimeuuidandmaxTimeuuid"><code>minTimeuuid</code> and <code>maxTimeuuid</code></h4><p>The <code>minTimeuuid</code> (resp. <code>maxTimeuuid</code>) function takes a <code>timestamp</code> value <code>t</code> (which can be <a href="#usingtimestamps">either a timestamp or a date string</a> ) and return a <em>fake</em> <code>timeuuid</code> corresponding to the <em>smallest</em> (resp. <em>biggest</em>) possible <code>timeuuid</code> having for timestamp <code>t</code>. So for instance:</p><pre class="sample"><pre>SELECT * FROM myTable WHERE t > maxTimeuuid('2013-01-01 00:05+0000') AND t &lt; minTimeuuid('2013-02-02 10:00+0000')
-</pre></pre><p>will select all rows where the <code>timeuuid</code> column <code>t</code> is strictly older than &#8216;2013-01-01 00:05+0000&#8217; but strictly younger than &#8216;2013-02-02 10:00+0000&#8217;.  Please note that <code>t >= maxTimeuuid('2013-01-01 00:05+0000')</code> would still <em>not</em> select a <code>timeuuid</code> generated exactly at &#8216;2013-01-01 00:05+0000&#8217; and is essentially equivalent to <code>t > maxTimeuuid('2013-01-01 00:05+0000')</code>.</p><p><em>Warning</em>: We called the values generated by <code>minTimeuuid</code> and <code>maxTimeuuid</code> <em>fake</em> UUID because they do no respect the Time-Based UUID generation process specified by the <a href="http://www.ietf.org/rfc/rfc4122.txt">RFC 4122</a>. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly <em>a bad idea</em>.<
 /p><h3 id="timeFun">Time conversion functions</h3><p>A number of functions are provided to &#8220;convert&#8221; a <code>timeuuid</code>, a <code>timestamp</code> or a <code>date</code> into another <code>native</code> type.</p><table><tr><th>function name    </th><th>input type   </th><th>description</th></tr><tr><td><code>toDate</code>            </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>date</code> type</td></tr><tr><td><code>toDate</code>            </td><td><code>timestamp</code>     </td><td>Converts the <code>timestamp</code> argument into a <code>date</code> type</td></tr><tr><td><code>toTimestamp</code>       </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>timestamp</code> type</td></tr><tr><td><code>toTimestamp</code>       </td><td><code>date</code>          </td><td>Converts the <code>date</code> argument into a <code>timestamp</code> type</td></tr><tr><td><c
 ode>toUnixTimestamp</code>   </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code>   </td><td><code>timestamp</code>     </td><td>Converts the <code>timestamp</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code>   </td><td><code>date</code>          </td><td>Converts the <code>date</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>dateOf</code>            </td><td><code>timeuuid</code>      </td><td>Similar to <code>toTimestamp(timeuuid)</code> (DEPRECATED)</td></tr><tr><td><code>unixTimestampOf</code>   </td><td><code>timeuuid</code>      </td><td>Similar to <code>toUnixTimestamp(timeuuid)</code> (DEPRECATED)</td></tr></table><h3 id="blobFun">Blob conversion functions</h3><p>A number of functions are provided to &#8220;convert&#8221; the native types into binary data (<code>blob</code>). For eve
 ry <code>&lt;native-type></code> <code>type</code> supported by CQL3 (a notable exceptions is <code>blob</code>, for obvious reasons), the function <code>typeAsBlob</code> takes a argument of type <code>type</code> and return it as a <code>blob</code>.  Conversely, the function <code>blobAsType</code> takes a 64-bit <code>blob</code> argument and convert it to a <code>bigint</code> value.  And so for instance, <code>bigintAsBlob(3)</code> is <code>0x0000000000000003</code> and <code>blobAsBigint(0x0000000000000003)</code> is <code>3</code>.</p><h2 id="aggregates">Aggregates</h2><p>CQL3 distinguishes between built-in aggregates (so called &#8216;native aggregates&#8217;) and <a href="#udas">user-defined aggregates</a>.  CQL3 includes several native aggregates, described below:</p><h3 id="countFct">Count</h3><p>The <code>count</code> function can be used to count the rows returned by a query. Example:</p><pre class="sample"><pre>SELECT COUNT(*) FROM plays;
+</pre></pre><p>will select all rows where the <code>timeuuid</code> column <code>t</code> is strictly older than &#8216;2013-01-01 00:05+0000&#8217; but strictly younger than &#8216;2013-02-02 10:00+0000&#8217;.  Please note that <code>t >= maxTimeuuid('2013-01-01 00:05+0000')</code> would still <em>not</em> select a <code>timeuuid</code> generated exactly at &#8216;2013-01-01 00:05+0000&#8217; and is essentially equivalent to <code>t > maxTimeuuid('2013-01-01 00:05+0000')</code>.</p><p><em>Warning</em>: We called the values generated by <code>minTimeuuid</code> and <code>maxTimeuuid</code> <em>fake</em> UUID because they do no respect the Time-Based UUID generation process specified by the <a href="http://www.ietf.org/rfc/rfc4122.txt">RFC 4122</a>. In particular, the value returned by these 2 methods will not be unique. This means you should only use those methods for querying (as in the example above). Inserting the result of those methods is almost certainly <em>a bad idea</em>.<
 /p><h3 id="timeFun">Time conversion functions</h3><p>A number of functions are provided to &#8220;convert&#8221; a <code>timeuuid</code>, a <code>timestamp</code> or a <code>date</code> into another <code>native</code> type.</p><table><tr><th>function name    </th><th>input type   </th><th>description</th></tr><tr><td><code>toDate</code>            </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>date</code> type</td></tr><tr><td><code>toDate</code>            </td><td><code>timestamp</code>     </td><td>Converts the <code>timestamp</code> argument into a <code>date</code> type</td></tr><tr><td><code>toTimestamp</code>       </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>timestamp</code> type</td></tr><tr><td><code>toTimestamp</code>       </td><td><code>date</code>          </td><td>Converts the <code>date</code> argument into a <code>timestamp</code> type</td></tr><tr><td><c
 ode>toUnixTimestamp</code>   </td><td><code>timeuuid</code>      </td><td>Converts the <code>timeuuid</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code>   </td><td><code>timestamp</code>     </td><td>Converts the <code>timestamp</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>toUnixTimestamp</code>   </td><td><code>date</code>          </td><td>Converts the <code>date</code> argument into a <code>bigInt</code> raw value</td></tr><tr><td><code>dateOf</code>            </td><td><code>timeuuid</code>      </td><td>Similar to <code>toTimestamp(timeuuid)</code> (DEPRECATED)</td></tr><tr><td><code>unixTimestampOf</code>   </td><td><code>timeuuid</code>      </td><td>Similar to <code>toUnixTimestamp(timeuuid)</code> (DEPRECATED)</td></tr></table><h3 id="blobFun">Blob conversion functions</h3><p>A number of functions are provided to &#8220;convert&#8221; the native types into binary data (<code>blob</code>). For eve
 ry <code>&lt;native-type></code> <code>type</code> supported by CQL3 (a notable exceptions is <code>blob</code>, for obvious reasons), the function <code>typeAsBlob</code> takes a argument of type <code>type</code> and return it as a <code>blob</code>.  Conversely, the function <code>blobAsType</code> takes a 64-bit <code>blob</code> argument and convert it to a <code>bigint</code> value.  And so for instance, <code>bigintAsBlob(3)</code> is <code>0x0000000000000003</code> and <code>blobAsBigint(0x0000000000000003)</code> is <code>3</code>.</p><h2 id="aggregates">Aggregates</h2><p>Aggregate functions work on a set of rows. They receive values for each row and returns one value for the whole set.<br/>If <code>normal</code> columns, <code>scalar functions</code>, <code>UDT</code> fields, <code>writetime</code> or <code>ttl</code> are selected together with aggregate functions, the values returned for them will be the ones of the first row matching the query.</p><p>CQL3 distinguishes b
 etween built-in aggregates (so called &#8216;native aggregates&#8217;) and <a href="#udas">user-defined aggregates</a>. CQL3 includes several native aggregates, described below:</p><h3 id="countFct">Count</h3><p>The <code>count</code> function can be used to count the rows returned by a query. Example:</p><pre class="sample"><pre>SELECT COUNT(*) FROM plays;
 SELECT COUNT(1) FROM plays;
 </pre></pre><p>It also can be used to count the non null value of a given column. Example:</p><pre class="sample"><pre>SELECT COUNT(scores) FROM plays;
 </pre></pre><h3 id="maxMinFcts">Max and Min</h3><p>The <code>max</code> and <code>min</code> functions can be used to compute the maximum and the minimum value returned by a query for a given column.</p><pre class="sample"><pre>SELECT MIN(players), MAX(players) FROM plays WHERE game = 'quake';
@@ -595,7 +597,7 @@ CREATE FUNCTION fct_using_udt ( udtarg f
   RETURNS text
   LANGUAGE java
   AS $$ return udtarg.getString("txt"); $$;

[... 32 lines stripped ...]