You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2013/10/06 18:29:21 UTC
[3/6] git commit: Limit CQL prepared statement cache by size instead
of count patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107
Limit CQL prepared statement cache by size instead of count
patch by Lyuben Todorov; reviewed by jbellis for CASSANDRA-6107
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4784e671
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4784e671
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4784e671
Branch: refs/heads/trunk
Commit: 4784e6717a7217f0d82cbf73d0d3883ec52211d4
Parents: 5b7dd5e
Author: Jonathan Ellis <jb...@apache.org>
Authored: Sun Oct 6 11:27:33 2013 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Sun Oct 6 11:27:33 2013 -0500
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../apache/cassandra/cql3/QueryProcessor.java | 53 ++++++++++++++++----
2 files changed, 44 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 5af4e2e..6cbfa14 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
1.2.11
+ * Limit CQL prepared statement cache by size instead of count (CASSANDRA-6107)
* Tracing should log write failure rather than raw exceptions (CASSANDRA-6133)
* lock access to TM.endpointToHostIdMap (CASSANDRA-6103)
* Allow estimated memtable size to exceed slab allocator size (CASSANDRA-6078)
http://git-wip-us.apache.org/repos/asf/cassandra/blob/4784e671/src/java/org/apache/cassandra/cql3/QueryProcessor.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
index dbc9577..c4e7c44 100644
--- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java
+++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java
@@ -20,8 +20,12 @@ package org.apache.cassandra.cql3;
import java.nio.ByteBuffer;
import java.util.*;
+import com.google.common.primitives.Ints;
+
import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
+import com.googlecode.concurrentlinkedhashmap.EntryWeigher;
import org.antlr.runtime.*;
+import org.github.jamm.MemoryMeter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,15 +45,36 @@ public class QueryProcessor
public static final SemanticVersion CQL_VERSION = new SemanticVersion("3.0.5");
private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);
+ private static final MemoryMeter meter = new MemoryMeter();
+ private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256;
+
+ private static EntryWeigher<MD5Digest, CQLStatement> cqlMemoryUsageWeigher = new EntryWeigher<MD5Digest, CQLStatement>()
+ {
+ @Override
+ public int weightOf(MD5Digest key, CQLStatement value)
+ {
+ return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+ }
+ };
- public static final int MAX_CACHE_PREPARED = 100000; // Enough to keep buggy clients from OOM'ing us
- private static final Map<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
- .maximumWeightedCapacity(MAX_CACHE_PREPARED)
- .build();
+ private static final ConcurrentLinkedHashMap<MD5Digest, CQLStatement> preparedStatements = new ConcurrentLinkedHashMap.Builder<MD5Digest, CQLStatement>()
+ .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+ .weigher(cqlMemoryUsageWeigher)
+ .build();
- private static final Map<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
- .maximumWeightedCapacity(MAX_CACHE_PREPARED)
- .build();
+ private static EntryWeigher<Integer, CQLStatement> thriftMemoryUsageWeigher = new EntryWeigher<Integer, CQLStatement>()
+ {
+ @Override
+ public int weightOf(Integer key, CQLStatement value)
+ {
+ return Ints.checkedCast(meter.measureDeep(key) + meter.measureDeep(value));
+ }
+ };
+
+ private static final ConcurrentLinkedHashMap<Integer, CQLStatement> thriftPreparedStatements = new ConcurrentLinkedHashMap.Builder<Integer, CQLStatement>()
+ .maximumWeightedCapacity(MAX_CACHE_PREPARED_MEMORY)
+ .weigher(thriftMemoryUsageWeigher)
+ .build();
public static CQLStatement getPrepared(MD5Digest id)
@@ -178,10 +203,18 @@ public class QueryProcessor
}
private static ResultMessage.Prepared storePreparedStatement(String queryString, String keyspace, ParsedStatement.Prepared prepared, boolean forThrift)
+ throws InvalidRequestException
{
// Concatenate the current keyspace so we don't mix prepared statements between keyspace (#5352).
// (if the keyspace is null, queryString has to have a fully-qualified keyspace so it's fine.
String toHash = keyspace == null ? queryString : keyspace + queryString;
+ long statementSize = meter.measureDeep(prepared.statement);
+ // don't execute the statement if it's bigger than the allowed threshold
+ if (statementSize > MAX_CACHE_PREPARED_MEMORY)
+ throw new InvalidRequestException(String.format("Prepared statement of size %d bytes is larger than allowed maximum of %d bytes.",
+ statementSize,
+ MAX_CACHE_PREPARED_MEMORY));
+
if (forThrift)
{
int statementId = toHash.hashCode();
@@ -194,10 +227,10 @@ public class QueryProcessor
else
{
MD5Digest statementId = MD5Digest.compute(toHash);
- logger.trace(String.format("Stored prepared statement %s with %d bind markers",
- statementId,
- prepared.statement.getBoundsTerms()));
preparedStatements.put(statementId, prepared.statement);
+ logger.trace(String.format("Stored prepared statement %s with %d bind markers",
+ statementId,
+ prepared.statement.getBoundsTerms()));
return new ResultMessage.Prepared(statementId, prepared.boundNames);
}
}