You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2013/10/06 18:29:19 UTC

[1/6] git commit: Limit CQL prepared statement cache by size instead of count patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107

Updated Branches:
  refs/heads/cassandra-1.2 5b7dd5e62 -> 4784e6717
  refs/heads/cassandra-2.0 27967f95b -> e9b624a73
  refs/heads/trunk 8d9b5fdae -> d34d84223


Limit CQL prepared statement cache by size instead of count
patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4784e671
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4784e671
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4784e671

Branch: refs/heads/cassandra-1.2
Commit: 4784e6717a7217f0d82cbf73d0d3883ec52211d4
Parents: 5b7dd5e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:27:33 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:27:33 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../apache/cassandra/cql3/QueryProcessor.java   | 53 ++++++++++++++++----
 2 files changed, 44 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 5af4e2e..6cbfa14 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.11
+ * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
  * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
  * lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
  * Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index dbc9577..c4e7c44 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -20,8 +20,12 @@ package org.apache.cassandra.cql3;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import com.google.common.primitives.Ints;
+
 import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import com.googlecode.concurrentlinkedhashmap.EntryWeigher;
 import org.antlr.runtime.*;
+import org.github.jamm.MemoryMeter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,15 +45,36 @@ public class QueryProcessor
     public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
 
     private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+    private static final MemoryMeter meter = new MemoryMeter();
+    private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
+
+    private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+    {
+        @Override
+        public int weightOf(MD5Digest key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
 
-    public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
-    private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
-                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                               .build();
+    private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+                                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                               .weigher(cqlMemoryUsageWeigher)
+                                                                                               .build();
 
-    private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
-                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                   .build();
+    private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+    {
+        @Override
+        public int weightOf(Integer key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
+
+    private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+                                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                   .weigher(thriftMemoryUsageWeigher)
+                                                                                                   .build();
 
 
     public static CQLStatement getPrepared(MD5Digest id)
@@ -178,10 +203,18 @@ public class QueryProcessor
     }
 
     private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared, boolean forThrift)
+    throws InvalidRequestException
     {
         // Concatenate the current keyspace so we don't mix prepared statements between keyspace (#5352).
         // (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine.
         String toHash = keyspace == null ? queryString : keyspace + queryString;
+        long statementSize = meter.measureDeep(prepared.statement);
+        // don't execute the statement if it's bigger than the allowed threshold
+        if (statementSize > MAX_CACHE_PREPARED_MEMORY)
+            throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d bytes.",
+                                                            statementSize,
+                                                            MAX_CACHE_PREPARED_MEMORY));
+
         if (forThrift)
         {
             int statementId = toHash.hashCode();
@@ -194,10 +227,10 @@ public class QueryProcessor
         else
         {
             MD5Digest statementId = MD5Digest.compute(toHash);
-            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
-                                       statementId,
-                                       prepared.statement.getBoundsTerms()));
             preparedStatements.put(statementId, prepared.statement);
+            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
+                         statementId,
+                         prepared.statement.getBoundsTerms()));
             return new ResultMessage.Prepared(statementId, prepared.boundNames);
         }
     }


[6/6] git commit: Merge branch 'cassandra-2.0' into trunk

Posted by jb...@apache.org.
Merge branch 'cassandra-2.0' into trunk


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d34d8422
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d34d8422
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d34d8422

Branch: refs/heads/trunk
Commit: d34d84223379a38c92d9a2ab8fc3c1efd6dcc64d
Parents: 8d9b5fd e9b624a
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:29:13 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:29:13 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  3 +-
 .../apache/cassandra/cql3/QueryProcessor.java   | 49 ++++++++++++++++----
 2 files changed, 43 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d34d8422/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 5256edf,94fa927..4ec387c
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,13 -1,5 +1,14 @@@
 +2.1
 + * Parallelize fetching rows for low-cardinality indexes (CASSANDRA-1337)
 + * change logging from log4j to logback (CASSANDRA-5883)
 + * switch to LZ4 compression for internode communication (CASSANDRA-5887)
 + * Stop using Thrift-generated Index* classes internally (CASSANDRA-5971)
 + * Remove 1.2 network compatibility code (CASSANDRA-5960)
 + * Remove leveled json manifest migration code (CASSANDRA-5996)
 +
 +
  2.0.2
+  * Never return WriteTimeout for CL.ANY (CASSANDRA-6032)
   * Fix race conditions in bulk loader (CASSANDRA-6129)
   * Add configurable metrics reporting (CASSANDRA-4430)
   * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117)


[2/6] git commit: Limit CQL prepared statement cache by size instead of count patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107

Posted by jb...@apache.org.
Limit CQL prepared statement cache by size instead of count
patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4784e671
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4784e671
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4784e671

Branch: refs/heads/cassandra-2.0
Commit: 4784e6717a7217f0d82cbf73d0d3883ec52211d4
Parents: 5b7dd5e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:27:33 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:27:33 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../apache/cassandra/cql3/QueryProcessor.java   | 53 ++++++++++++++++----
 2 files changed, 44 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 5af4e2e..6cbfa14 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.11
+ * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
  * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
  * lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
  * Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index dbc9577..c4e7c44 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -20,8 +20,12 @@ package org.apache.cassandra.cql3;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import com.google.common.primitives.Ints;
+
 import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import com.googlecode.concurrentlinkedhashmap.EntryWeigher;
 import org.antlr.runtime.*;
+import org.github.jamm.MemoryMeter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,15 +45,36 @@ public class QueryProcessor
     public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
 
     private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+    private static final MemoryMeter meter = new MemoryMeter();
+    private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
+
+    private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+    {
+        @Override
+        public int weightOf(MD5Digest key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
 
-    public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
-    private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
-                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                               .build();
+    private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+                                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                               .weigher(cqlMemoryUsageWeigher)
+                                                                                               .build();
 
-    private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
-                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                   .build();
+    private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+    {
+        @Override
+        public int weightOf(Integer key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
+
+    private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+                                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                   .weigher(thriftMemoryUsageWeigher)
+                                                                                                   .build();
 
 
     public static CQLStatement getPrepared(MD5Digest id)
@@ -178,10 +203,18 @@ public class QueryProcessor
     }
 
     private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared, boolean forThrift)
+    throws InvalidRequestException
     {
         // Concatenate the current keyspace so we don't mix prepared statements between keyspace (#5352).
         // (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine.
         String toHash = keyspace == null ? queryString : keyspace + queryString;
+        long statementSize = meter.measureDeep(prepared.statement);
+        // don't execute the statement if it's bigger than the allowed threshold
+        if (statementSize > MAX_CACHE_PREPARED_MEMORY)
+            throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d bytes.",
+                                                            statementSize,
+                                                            MAX_CACHE_PREPARED_MEMORY));
+
         if (forThrift)
         {
             int statementId = toHash.hashCode();
@@ -194,10 +227,10 @@ public class QueryProcessor
         else
         {
             MD5Digest statementId = MD5Digest.compute(toHash);
-            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
-                                       statementId,
-                                       prepared.statement.getBoundsTerms()));
             preparedStatements.put(statementId, prepared.statement);
+            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
+                         statementId,
+                         prepared.statement.getBoundsTerms()));
             return new ResultMessage.Prepared(statementId, prepared.boundNames);
         }
     }


[5/6] git commit: merge from 1.2

Posted by jb...@apache.org.
merge from 1.2


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e9b624a7
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e9b624a7
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e9b624a7

Branch: refs/heads/cassandra-2.0
Commit: e9b624a7379b88b024cf9b06b14238699a2ff466
Parents: 27967f9 4784e67
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:29:04 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:29:04 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  3 +-
 .../apache/cassandra/cql3/QueryProcessor.java   | 49 ++++++++++++++++----
 2 files changed, 43 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9b624a7/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 1590dd0,6cbfa14..94fa927
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,25 -1,5 +1,26 @@@
 -1.2.11
 +2.0.2
++ * Never return WriteTimeout for CL.ANY (CASSANDRA-6032)
 + * Fix race conditions in bulk loader (CASSANDRA-6129)
 + * Add configurable metrics reporting (CASSANDRA-4430)
 + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117)
 + * Track and persist sstable read activity (CASSANDRA-5515)
 + * Fixes for speculative retry (CASSANDRA-5932)
 + * Improve memory usage of metadata min/max column names (CASSANDRA-6077)
 + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081)
 + * Fix insertion of collections with CAS (CASSANDRA-6069)
 + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080)
 + * Track clients' remote addresses in ClientState (CASSANDRA-6070)
 + * Create snapshot dir if it does not exist when migrating
 +   leveled manifest (CASSANDRA-6093)
 + * make sequential nodetool repair the default (CASSANDRA-5950)
 + * Add more hooks for compaction strategy implementations (CASSANDRA-6111)
 + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098)
 + * Delete can potentially be skipped in batch (CASSANDRA-6115)
 + * Allow alter keyspace on system_traces (CASSANDRA-6016)
 + * Disallow empty column names in cql (CASSANDRA-6136)
 + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383)
 +Merged from 1.2:
-  * Never return WriteTimeout for CL.ANY (CASSANDRA-6032)
+  * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
   * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
   * lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
   * Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9b624a7/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 52396e7,c4e7c44..72351d5
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@@ -38,18 -42,39 +42,39 @@@ import org.apache.cassandra.utils.Seman
  
  public class QueryProcessor
  {
 -    public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
 +    public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.1.1");
  
      private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+     private static final MemoryMeter meter = new MemoryMeter();
+     private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
  
-     public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
-     private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
-                                                                                .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                .build();
+     private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+     {
+         @Override
+         public int weightOf(MD5Digest key, CQLStatement value)
+         {
+             return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+         }
+     };
  
-     private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
-                                                                                    .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                    .build();
+     private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+                                                                                                .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                .weigher(cqlMemoryUsageWeigher)
+                                                                                                .build();
+ 
+     private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+     {
+         @Override
+         public int weightOf(Integer key, CQLStatement value)
+         {
+             return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+         }
+     };
+ 
+     private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+                                                                                                    .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                    .weigher(thriftMemoryUsageWeigher)
+                                                                                                    .build();
  
  
      public static CQLStatement getPrepared(MD5Digest id)
@@@ -204,11 -227,11 +237,11 @@@
          else
          {
              MD5Digest statementId = MD5Digest.compute(toHash);
+             preparedStatements.put(statementId, prepared.statement);
              logger.trace(String.format("Stored prepared statement %s with %d bind markers",
 -                         statementId,
 -                         prepared.statement.getBoundsTerms()));
 -            return new ResultMessage.Prepared(statementId, prepared.boundNames);
 +                                       statementId,
 +                                       prepared.statement.getBoundsTerms()));
-             preparedStatements.put(statementId, prepared.statement);
 +            return new ResultMessage.Prepared(statementId, prepared);
          }
      }
  


[4/6] git commit: merge from 1.2

Posted by jb...@apache.org.
merge from 1.2


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e9b624a7
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e9b624a7
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e9b624a7

Branch: refs/heads/trunk
Commit: e9b624a7379b88b024cf9b06b14238699a2ff466
Parents: 27967f9 4784e67
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:29:04 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:29:04 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  3 +-
 .../apache/cassandra/cql3/QueryProcessor.java   | 49 ++++++++++++++++----
 2 files changed, 43 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9b624a7/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 1590dd0,6cbfa14..94fa927
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,25 -1,5 +1,26 @@@
 -1.2.11
 +2.0.2
++ * Never return WriteTimeout for CL.ANY (CASSANDRA-6032)
 + * Fix race conditions in bulk loader (CASSANDRA-6129)
 + * Add configurable metrics reporting (CASSANDRA-4430)
 + * drop queries exceeding a configurable number of tombstones (CASSANDRA-6117)
 + * Track and persist sstable read activity (CASSANDRA-5515)
 + * Fixes for speculative retry (CASSANDRA-5932)
 + * Improve memory usage of metadata min/max column names (CASSANDRA-6077)
 + * Fix thrift validation refusing row markers on CQL3 tables (CASSANDRA-6081)
 + * Fix insertion of collections with CAS (CASSANDRA-6069)
 + * Correctly send metadata on SELECT COUNT (CASSANDRA-6080)
 + * Track clients' remote addresses in ClientState (CASSANDRA-6070)
 + * Create snapshot dir if it does not exist when migrating
 +   leveled manifest (CASSANDRA-6093)
 + * make sequential nodetool repair the default (CASSANDRA-5950)
 + * Add more hooks for compaction strategy implementations (CASSANDRA-6111)
 + * Fix potential NPE on composite 2ndary indexes (CASSANDRA-6098)
 + * Delete can potentially be skipped in batch (CASSANDRA-6115)
 + * Allow alter keyspace on system_traces (CASSANDRA-6016)
 + * Disallow empty column names in cql (CASSANDRA-6136)
 + * Use Java7 file-handling APIs and fix file moving on Windows (CASSANDRA-5383)
 +Merged from 1.2:
-  * Never return WriteTimeout for CL.ANY (CASSANDRA-6032)
+  * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
   * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
   * lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
   * Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e9b624a7/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/cql3/QueryProcessor.java
index 52396e7,c4e7c44..72351d5
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@@ -38,18 -42,39 +42,39 @@@ import org.apache.cassandra.utils.Seman
  
  public class QueryProcessor
  {
 -    public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
 +    public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.1.1");
  
      private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+     private static final MemoryMeter meter = new MemoryMeter();
+     private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
  
-     public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
-     private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
-                                                                                .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                .build();
+     private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+     {
+         @Override
+         public int weightOf(MD5Digest key, CQLStatement value)
+         {
+             return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+         }
+     };
  
-     private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
-                                                                                    .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                    .build();
+     private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+                                                                                                .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                .weigher(cqlMemoryUsageWeigher)
+                                                                                                .build();
+ 
+     private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+     {
+         @Override
+         public int weightOf(Integer key, CQLStatement value)
+         {
+             return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+         }
+     };
+ 
+     private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+                                                                                                    .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                    .weigher(thriftMemoryUsageWeigher)
+                                                                                                    .build();
  
  
      public static CQLStatement getPrepared(MD5Digest id)
@@@ -204,11 -227,11 +237,11 @@@
          else
          {
              MD5Digest statementId = MD5Digest.compute(toHash);
+             preparedStatements.put(statementId, prepared.statement);
              logger.trace(String.format("Stored prepared statement %s with %d bind markers",
 -                         statementId,
 -                         prepared.statement.getBoundsTerms()));
 -            return new ResultMessage.Prepared(statementId, prepared.boundNames);
 +                                       statementId,
 +                                       prepared.statement.getBoundsTerms()));
-             preparedStatements.put(statementId, prepared.statement);
 +            return new ResultMessage.Prepared(statementId, prepared);
          }
      }
  


[3/6] git commit: Limit CQL prepared statement cache by size instead of count patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107

Posted by jb...@apache.org.
Limit CQL prepared statement cache by size instead of count
patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4784e671
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4784e671
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4784e671

Branch: refs/heads/trunk
Commit: 4784e6717a7217f0d82cbf73d0d3883ec52211d4
Parents: 5b7dd5e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:27:33 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:27:33 2013 -0500

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 .../apache/cassandra/cql3/QueryProcessor.java   | 53 ++++++++++++++++----
 2 files changed, 44 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 5af4e2e..6cbfa14 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 1.2.11
+ * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
  * Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
  * lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
  * Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index dbc9577..c4e7c44 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -20,8 +20,12 @@ package org.apache.cassandra.cql3;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import com.google.common.primitives.Ints;
+
 import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import com.googlecode.concurrentlinkedhashmap.EntryWeigher;
 import org.antlr.runtime.*;
+import org.github.jamm.MemoryMeter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,15 +45,36 @@ public class QueryProcessor
     public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
 
     private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+    private static final MemoryMeter meter = new MemoryMeter();
+    private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
+
+    private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+    {
+        @Override
+        public int weightOf(MD5Digest key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
 
-    public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
-    private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
-                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                               .build();
+    private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+                                                                                               .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                               .weigher(cqlMemoryUsageWeigher)
+                                                                                               .build();
 
-    private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
-                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED)
-                                                                                   .build();
+    private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+    {
+        @Override
+        public int weightOf(Integer key, CQLStatement value)
+        {
+            return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+        }
+    };
+
+    private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+                                                                                                   .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+                                                                                                   .weigher(thriftMemoryUsageWeigher)
+                                                                                                   .build();
 
 
     public static CQLStatement getPrepared(MD5Digest id)
@@ -178,10 +203,18 @@ public class QueryProcessor
     }
 
     private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared, boolean forThrift)
+    throws InvalidRequestException
     {
         // Concatenate the current keyspace so we don't mix prepared statements between keyspace (#5352).
         // (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine.
         String toHash = keyspace == null ? queryString : keyspace + queryString;
+        long statementSize = meter.measureDeep(prepared.statement);
+        // don't execute the statement if it's bigger than the allowed threshold
+        if (statementSize > MAX_CACHE_PREPARED_MEMORY)
+            throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d bytes.",
+                                                            statementSize,
+                                                            MAX_CACHE_PREPARED_MEMORY));
+
         if (forThrift)
         {
             int statementId = toHash.hashCode();
@@ -194,10 +227,10 @@ public class QueryProcessor
         else
         {
             MD5Digest statementId = MD5Digest.compute(toHash);
-            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
-                                       statementId,
-                                       prepared.statement.getBoundsTerms()));
             preparedStatements.put(statementId, prepared.statement);
+            logger.trace(String.format("Stored prepared statement %s with %d bind markers",
+                         statementId,
+                         prepared.statement.getBoundsTerms()));
             return new ResultMessage.Prepared(statementId, prepared.boundNames);
         }
     }