You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jackrabbit.apache.org by ju...@apache.org on 2010/10/05 16:15:26 UTC
svn commit: r1004652 -
/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java
Author: jukka
Date: Tue Oct 5 14:15:26 2010
New Revision: 1004652
URL: http://svn.apache.org/viewvc?rev=1004652&view=rev
Log:
JCR-2699: Improve read/write concurrency
Evict old entries more aggressively
Modified:
jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java
Modified: jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java
URL: http://svn.apache.org/viewvc/jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java?rev=1004652&r1=1004651&r2=1004652&view=diff
==============================================================================
--- jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java (original)
+++ jackrabbit/trunk/jackrabbit-core/src/main/java/org/apache/jackrabbit/core/cache/ConcurrentCache.java Tue Oct 5 14:15:26 2010
@@ -147,16 +147,21 @@ public class ConcurrentCache<K, V> exten
* @return the previous value, or <code>null</code>
*/
public V put(K key, V value, long size) {
+ E<V> previous;
+
Map<K, E<V>> segment = getSegment(key);
synchronized (segment) {
recordSizeChange(size);
- E<V> previous = segment.put(key, new E<V>(value, size));
- if (previous != null) {
- recordSizeChange(-previous.size);
- return previous.value;
- } else {
- return null;
- }
+ previous = segment.put(key, new E<V>(value, size));
+ }
+
+ if (previous != null) {
+ recordSizeChange(-previous.size);
+ shrinkIfNeeded();
+ return previous.value;
+ } else {
+ shrinkIfNeeded();
+ return null;
}
}
@@ -210,7 +215,13 @@ public class ConcurrentCache<K, V> exten
@Override
public void setMaxMemorySize(long size) {
super.setMaxMemorySize(size);
+ shrinkIfNeeded();
+ }
+ /**
+ * Removes old entries from the cache until the cache is small enough.
+ */
+ private void shrinkIfNeeded() {
// Semi-random start index to prevent bias against the first segments
int start = (int) getAccessCount() % segments.length;
for (int i = start; isTooBig(); i = (i + 1) % segments.length) {
@@ -218,10 +229,9 @@ public class ConcurrentCache<K, V> exten
Iterator<Map.Entry<K, E<V>>> iterator =
segments[i].entrySet().iterator();
if (iterator.hasNext()) {
- Map.Entry<K, E<V>> entry = iterator.next();
// Removing and re-adding the first entry will
- // automatically the last entry if the cache is
- // too big
+ // evict the last entry if the cache is too big
+ Map.Entry<K, E<V>> entry = iterator.next();
segments[i].remove(entry.getKey());
segments[i].put(entry.getKey(), entry.getValue());
}