You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@groovy.apache.org by su...@apache.org on 2017/12/01 16:29:02 UTC
[1/3] groovy git commit: Keep concurrentlinkedhashmap as it is
Repository: groovy
Updated Branches:
refs/heads/master 35aceee82 -> cebc337bd
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EntryWeigher.java
----------------------------------------------------------------------
diff --git a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EntryWeigher.java b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EntryWeigher.java
index aea2e42..1075c10 100644
--- a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EntryWeigher.java
+++ b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EntryWeigher.java
@@ -15,7 +15,7 @@
*/
package org.apache.groovy.util.concurrentlinkedhashmap;
-//import javax.annotation.concurrent.ThreadSafe;
+import javax.annotation.concurrent.ThreadSafe;
/**
* A class that can determine the weight of an entry. The total weight threshold
@@ -25,7 +25,7 @@ package org.apache.groovy.util.concurrentlinkedhashmap;
* @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
* http://code.google.com/p/concurrentlinkedhashmap/</a>
*/
-//@ThreadSafe
+@ThreadSafe
public interface EntryWeigher<K, V> {
/**
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EvictionListener.java
----------------------------------------------------------------------
diff --git a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EvictionListener.java b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EvictionListener.java
index 36b48f5..4b608a0 100644
--- a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EvictionListener.java
+++ b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/EvictionListener.java
@@ -15,7 +15,7 @@
*/
package org.apache.groovy.util.concurrentlinkedhashmap;
-//import javax.annotation.concurrent.ThreadSafe;
+import javax.annotation.concurrent.ThreadSafe;
/**
* A listener registered for notification when an entry is evicted. An instance
@@ -35,7 +35,7 @@ package org.apache.groovy.util.concurrentlinkedhashmap;
* @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
* http://code.google.com/p/concurrentlinkedhashmap/</a>
*/
-//@ThreadSafe
+@ThreadSafe
public interface EvictionListener<K, V> {
/**
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/LinkedDeque.java
----------------------------------------------------------------------
diff --git a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/LinkedDeque.java b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/LinkedDeque.java
index 8deda5a..447b769 100644
--- a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/LinkedDeque.java
+++ b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/LinkedDeque.java
@@ -15,14 +15,13 @@
*/
package org.apache.groovy.util.concurrentlinkedhashmap;
+import javax.annotation.concurrent.NotThreadSafe;
import java.util.AbstractCollection;
import java.util.Collection;
import java.util.Deque;
import java.util.Iterator;
import java.util.NoSuchElementException;
-//import javax.annotation.concurrent.NotThreadSafe;
-
/**
* Linked list implementation of the {@link Deque} interface where the link
* pointers are tightly integrated with the element. Linked deques have no
@@ -45,7 +44,7 @@ import java.util.NoSuchElementException;
* @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
* http://code.google.com/p/concurrentlinkedhashmap/</a>
*/
-//@NotThreadSafe
+@NotThreadSafe
final class LinkedDeque<E extends Linked<E>> extends AbstractCollection<E> implements Deque<E> {
// This class provides a doubly-linked list that is optimized for the virtual
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/Weigher.java
----------------------------------------------------------------------
diff --git a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/Weigher.java b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/Weigher.java
index e554d68..c7eac09 100644
--- a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/Weigher.java
+++ b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/Weigher.java
@@ -15,7 +15,7 @@
*/
package org.apache.groovy.util.concurrentlinkedhashmap;
-//import javax.annotation.concurrent.ThreadSafe;
+import javax.annotation.concurrent.ThreadSafe;
/**
* A class that can determine the weight of a value. The total weight threshold
@@ -25,7 +25,7 @@ package org.apache.groovy.util.concurrentlinkedhashmap;
* @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
* http://code.google.com/p/concurrentlinkedhashmap/</a>
*/
-//@ThreadSafe
+@ThreadSafe
public interface Weigher<V> {
/**
[3/3] groovy git commit: Keep concurrentlinkedhashmap as it is
Posted by su...@apache.org.
Keep concurrentlinkedhashmap as it is
Project: http://git-wip-us.apache.org/repos/asf/groovy/repo
Commit: http://git-wip-us.apache.org/repos/asf/groovy/commit/cebc337b
Tree: http://git-wip-us.apache.org/repos/asf/groovy/tree/cebc337b
Diff: http://git-wip-us.apache.org/repos/asf/groovy/diff/cebc337b
Branch: refs/heads/master
Commit: cebc337bdd8e33ce832898c99348852f90e62d0f
Parents: 35aceee
Author: sunlan <su...@apache.org>
Authored: Sat Dec 2 00:28:49 2017 +0800
Committer: sunlan <su...@apache.org>
Committed: Sat Dec 2 00:28:49 2017 +0800
----------------------------------------------------------------------
build.gradle | 4 +
.../ConcurrentLinkedHashMap.java | 2628 +++++++++---------
.../concurrentlinkedhashmap/EntryWeigher.java | 4 +-
.../EvictionListener.java | 4 +-
.../concurrentlinkedhashmap/LinkedDeque.java | 5 +-
.../util/concurrentlinkedhashmap/Weigher.java | 4 +-
6 files changed, 1270 insertions(+), 1379 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 88e15bd..e4ed3c9 100644
--- a/build.gradle
+++ b/build.gradle
@@ -187,6 +187,7 @@ ext {
xstreamVersion = '1.4.10'
spockVersion = '1.1-groovy-2.4-SNAPSHOT' // supports 3.0
antlr4Version = '4.7'
+ jsr305Version = '3.0.2'
isReleaseVersion = !groovyVersion.toLowerCase().endsWith("snapshot")
}
@@ -213,9 +214,12 @@ dependencies {
}
compile files("${buildDir}/generated-classes")
+ compileOnly "com.google.code.findbugs:jsr305:$jsr305Version"
+
runtime("org.codehaus.gpars:gpars:$gparsVersion") {
exclude(group: 'org.codehaus.groovy', module: 'groovy-all')
}
+
testCompile "jmock:jmock:$jmockVersion"
testCompile "jmock:jmock-cglib:$jmockVersion"
testCompile "xmlunit:xmlunit:$xmlunitVersion"
[2/3] groovy git commit: Keep concurrentlinkedhashmap as it is
Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/groovy/blob/cebc337b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
----------------------------------------------------------------------
diff --git a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
index a981ccd..938ef63 100644
--- a/src/main/org/apache/groovy/util/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
+++ b/src/main/org/apache/groovy/util/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
@@ -15,6 +15,9 @@
*/
package org.apache.groovy.util.concurrentlinkedhashmap;
+import javax.annotation.concurrent.GuardedBy;
+import javax.annotation.concurrent.Immutable;
+import javax.annotation.concurrent.ThreadSafe;
import java.io.InvalidObjectException;
import java.io.ObjectInputStream;
import java.io.Serializable;
@@ -24,7 +27,6 @@ import java.util.AbstractQueue;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.HashMap;
-import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
@@ -46,10 +48,6 @@ import static org.apache.groovy.util.concurrentlinkedhashmap.ConcurrentLinkedHas
import static org.apache.groovy.util.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.PROCESSING;
import static org.apache.groovy.util.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.REQUIRED;
-//import javax.annotation.concurrent.GuardedBy;
-//import javax.annotation.concurrent.Immutable;
-//import javax.annotation.concurrent.ThreadSafe;
-
/**
* A hash table supporting full concurrency of retrievals, adjustable expected
* concurrency for updates, and a maximum capacity to bound the map by. This
@@ -89,21 +87,21 @@ import static org.apache.groovy.util.concurrentlinkedhashmap.ConcurrentLinkedHas
* <em>optional</em> methods of the {@link Map} and {@link Iterator}
* interfaces.
* <p>
- * Like {@link Hashtable} but unlike {@link HashMap}, this class
+ * Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class
* does <em>not</em> allow <tt>null</tt> to be used as a key or value. Unlike
* {@link LinkedHashMap}, this class does <em>not</em> provide
* predictable iteration order. A snapshot of the keys and entries may be
* obtained in ascending and descending order of retention.
*
+ * @author ben.manes@gmail.com (Ben Manes)
* @param <K> the type of keys maintained by this map
* @param <V> the type of mapped values
- * @author ben.manes@gmail.com (Ben Manes)
* @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
- * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
*/
-//@ThreadSafe
+@ThreadSafe
public final class ConcurrentLinkedHashMap<K, V> extends AbstractMap<K, V>
- implements ConcurrentMap<K, V>, Serializable {
+ implements ConcurrentMap<K, V>, Serializable {
/*
* This class performs a best-effort bounding of a ConcurrentHashMap using a
@@ -144,1569 +142,1459 @@ public final class ConcurrentLinkedHashMap<K, V> extends AbstractMap<K, V>
* complexity.
*/
- /**
- * The number of CPUs
- */
- static final int NCPU = Runtime.getRuntime().availableProcessors();
+ /** The number of CPUs */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
- /**
- * The maximum weighted capacity of the map.
- */
- static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
+ /** The maximum weighted capacity of the map. */
+ static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
- /**
- * The number of read buffers to use.
- */
- static final int NUMBER_OF_READ_BUFFERS = ceilingNextPowerOfTwo(NCPU);
+ /** The number of read buffers to use. */
+ static final int NUMBER_OF_READ_BUFFERS = ceilingNextPowerOfTwo(NCPU);
- /**
- * Mask value for indexing into the read buffers.
- */
- static final int READ_BUFFERS_MASK = NUMBER_OF_READ_BUFFERS - 1;
+ /** Mask value for indexing into the read buffers. */
+ static final int READ_BUFFERS_MASK = NUMBER_OF_READ_BUFFERS - 1;
- /**
- * The number of pending read operations before attempting to drain.
- */
- static final int READ_BUFFER_THRESHOLD = 32;
+ /** The number of pending read operations before attempting to drain. */
+ static final int READ_BUFFER_THRESHOLD = 32;
- /**
- * The maximum number of read operations to perform per amortized drain.
- */
- static final int READ_BUFFER_DRAIN_THRESHOLD = 2 * READ_BUFFER_THRESHOLD;
+ /** The maximum number of read operations to perform per amortized drain. */
+ static final int READ_BUFFER_DRAIN_THRESHOLD = 2 * READ_BUFFER_THRESHOLD;
- /**
- * The maximum number of pending reads per buffer.
- */
- static final int READ_BUFFER_SIZE = 2 * READ_BUFFER_DRAIN_THRESHOLD;
+ /** The maximum number of pending reads per buffer. */
+ static final int READ_BUFFER_SIZE = 2 * READ_BUFFER_DRAIN_THRESHOLD;
- /**
- * Mask value for indexing into the read buffer.
- */
- static final int READ_BUFFER_INDEX_MASK = READ_BUFFER_SIZE - 1;
+ /** Mask value for indexing into the read buffer. */
+ static final int READ_BUFFER_INDEX_MASK = READ_BUFFER_SIZE - 1;
- /**
- * The maximum number of write operations to perform per amortized drain.
- */
- static final int WRITE_BUFFER_DRAIN_THRESHOLD = 16;
+ /** The maximum number of write operations to perform per amortized drain. */
+ static final int WRITE_BUFFER_DRAIN_THRESHOLD = 16;
- /**
- * A queue that discards all entries.
- */
- static final Queue<?> DISCARDING_QUEUE = new DiscardingQueue();
+ /** A queue that discards all entries. */
+ static final Queue<?> DISCARDING_QUEUE = new DiscardingQueue();
- static int ceilingNextPowerOfTwo(int x) {
- // From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
- return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
- }
+ static int ceilingNextPowerOfTwo(int x) {
+ // From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
+ return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
+ }
- // The backing data store holding the key-value associations
- final ConcurrentMap<K, Node<K, V>> data;
- final int concurrencyLevel;
+ // The backing data store holding the key-value associations
+ final ConcurrentMap<K, Node<K, V>> data;
+ final int concurrencyLevel;
- // These fields provide support to bound the map by a maximum capacity
- // @GuardedBy("evictionLock")
- final long[] readBufferReadCount;
- // @GuardedBy("evictionLock")
- final LinkedDeque<Node<K, V>> evictionDeque;
+ // These fields provide support to bound the map by a maximum capacity
+ @GuardedBy("evictionLock")
+ final long[] readBufferReadCount;
+ @GuardedBy("evictionLock")
+ final LinkedDeque<Node<K, V>> evictionDeque;
- // @GuardedBy("evictionLock") // must write under lock
- final AtomicLong weightedSize;
- // @GuardedBy("evictionLock") // must write under lock
- final AtomicLong capacity;
+ @GuardedBy("evictionLock") // must write under lock
+ final AtomicLong weightedSize;
+ @GuardedBy("evictionLock") // must write under lock
+ final AtomicLong capacity;
- final Lock evictionLock;
- final Queue<Runnable> writeBuffer;
- final AtomicLong[] readBufferWriteCount;
- final AtomicLong[] readBufferDrainAtWriteCount;
- final AtomicReference<Node<K, V>>[][] readBuffers;
+ final Lock evictionLock;
+ final Queue<Runnable> writeBuffer;
+ final AtomicLong[] readBufferWriteCount;
+ final AtomicLong[] readBufferDrainAtWriteCount;
+ final AtomicReference<Node<K, V>>[][] readBuffers;
- final AtomicReference<DrainStatus> drainStatus;
- final EntryWeigher<? super K, ? super V> weigher;
+ final AtomicReference<DrainStatus> drainStatus;
+ final EntryWeigher<? super K, ? super V> weigher;
- // These fields provide support for notifying a listener.
- final Queue<Node<K, V>> pendingNotifications;
- final EvictionListener<K, V> listener;
+ // These fields provide support for notifying a listener.
+ final Queue<Node<K, V>> pendingNotifications;
+ final EvictionListener<K, V> listener;
- transient Set<K> keySet;
- transient Collection<V> values;
- transient Set<Entry<K, V>> entrySet;
+ transient Set<K> keySet;
+ transient Collection<V> values;
+ transient Set<Entry<K, V>> entrySet;
- /**
- * Creates an instance based on the builder's configuration.
- */
- @SuppressWarnings({"unchecked", "cast"})
- private ConcurrentLinkedHashMap(Builder<K, V> builder) {
- // The data store and its maximum capacity
- concurrencyLevel = builder.concurrencyLevel;
- capacity = new AtomicLong(Math.min(builder.capacity, MAXIMUM_CAPACITY));
- data = new ConcurrentHashMap<K, Node<K, V>>(builder.initialCapacity, 0.75f, concurrencyLevel);
-
- // The eviction support
- weigher = builder.weigher;
- evictionLock = new ReentrantLock();
- weightedSize = new AtomicLong();
- evictionDeque = new LinkedDeque<Node<K, V>>();
- writeBuffer = new ConcurrentLinkedQueue<Runnable>();
- drainStatus = new AtomicReference<DrainStatus>(IDLE);
-
- readBufferReadCount = new long[NUMBER_OF_READ_BUFFERS];
- readBufferWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
- readBufferDrainAtWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
- readBuffers = new AtomicReference[NUMBER_OF_READ_BUFFERS][READ_BUFFER_SIZE];
- for (int i = 0; i < NUMBER_OF_READ_BUFFERS; i++) {
- readBufferWriteCount[i] = new AtomicLong();
- readBufferDrainAtWriteCount[i] = new AtomicLong();
- readBuffers[i] = new AtomicReference[READ_BUFFER_SIZE];
- for (int j = 0; j < READ_BUFFER_SIZE; j++) {
- readBuffers[i][j] = new AtomicReference<Node<K, V>>();
- }
- }
+ /**
+ * Creates an instance based on the builder's configuration.
+ */
+ @SuppressWarnings({"unchecked", "cast"})
+ private ConcurrentLinkedHashMap(Builder<K, V> builder) {
+ // The data store and its maximum capacity
+ concurrencyLevel = builder.concurrencyLevel;
+ capacity = new AtomicLong(Math.min(builder.capacity, MAXIMUM_CAPACITY));
+ data = new ConcurrentHashMap<K, Node<K, V>>(builder.initialCapacity, 0.75f, concurrencyLevel);
+
+ // The eviction support
+ weigher = builder.weigher;
+ evictionLock = new ReentrantLock();
+ weightedSize = new AtomicLong();
+ evictionDeque = new LinkedDeque<Node<K, V>>();
+ writeBuffer = new ConcurrentLinkedQueue<Runnable>();
+ drainStatus = new AtomicReference<DrainStatus>(IDLE);
+
+ readBufferReadCount = new long[NUMBER_OF_READ_BUFFERS];
+ readBufferWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
+ readBufferDrainAtWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
+ readBuffers = new AtomicReference[NUMBER_OF_READ_BUFFERS][READ_BUFFER_SIZE];
+ for (int i = 0; i < NUMBER_OF_READ_BUFFERS; i++) {
+ readBufferWriteCount[i] = new AtomicLong();
+ readBufferDrainAtWriteCount[i] = new AtomicLong();
+ readBuffers[i] = new AtomicReference[READ_BUFFER_SIZE];
+ for (int j = 0; j < READ_BUFFER_SIZE; j++) {
+ readBuffers[i][j] = new AtomicReference<Node<K, V>>();
+ }
+ }
+
+ // The notification queue and listener
+ listener = builder.listener;
+ pendingNotifications = (listener == DiscardingListener.INSTANCE)
+ ? (Queue<Node<K, V>>) DISCARDING_QUEUE
+ : new ConcurrentLinkedQueue<Node<K, V>>();
+ }
+
+ /** Ensures that the object is not null. */
+ static void checkNotNull(Object o) {
+ if (o == null) {
+ throw new NullPointerException();
+ }
+ }
+
+ /** Ensures that the argument expression is true. */
+ static void checkArgument(boolean expression) {
+ if (!expression) {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ /** Ensures that the state expression is true. */
+ static void checkState(boolean expression) {
+ if (!expression) {
+ throw new IllegalStateException();
+ }
+ }
+
+ /* ---------------- Eviction Support -------------- */
- // The notification queue and listener
- listener = builder.listener;
- pendingNotifications = (listener == DiscardingListener.INSTANCE)
- ? (Queue<Node<K, V>>) DISCARDING_QUEUE
- : new ConcurrentLinkedQueue<Node<K, V>>();
+ /**
+ * Retrieves the maximum weighted capacity of the map.
+ *
+ * @return the maximum weighted capacity
+ */
+ public long capacity() {
+ return capacity.get();
+ }
+
+ /**
+ * Sets the maximum weighted capacity of the map and eagerly evicts entries
+ * until it shrinks to the appropriate size.
+ *
+ * @param capacity the maximum weighted capacity of the map
+ * @throws IllegalArgumentException if the capacity is negative
+ */
+ public void setCapacity(long capacity) {
+ checkArgument(capacity >= 0);
+ evictionLock.lock();
+ try {
+ this.capacity.lazySet(Math.min(capacity, MAXIMUM_CAPACITY));
+ drainBuffers();
+ evict();
+ } finally {
+ evictionLock.unlock();
+ }
+ notifyListener();
+ }
+
+ /** Determines whether the map has exceeded its capacity. */
+ @GuardedBy("evictionLock")
+ boolean hasOverflowed() {
+ return weightedSize.get() > capacity.get();
+ }
+
+ /**
+ * Evicts entries from the map while it exceeds the capacity and appends
+ * evicted entries to the notification queue for processing.
+ */
+ @GuardedBy("evictionLock")
+ void evict() {
+ // Attempts to evict entries from the map if it exceeds the maximum
+ // capacity. If the eviction fails due to a concurrent removal of the
+ // victim, that removal may cancel out the addition that triggered this
+ // eviction. The victim is eagerly unlinked before the removal task so
+ // that if an eviction is still required then a new victim will be chosen
+ // for removal.
+ while (hasOverflowed()) {
+ final Node<K, V> node = evictionDeque.poll();
+
+ // If weighted values are used, then the pending operations will adjust
+ // the size to reflect the correct weight
+ if (node == null) {
+ return;
+ }
+
+ // Notify the listener only if the entry was evicted
+ if (data.remove(node.key, node)) {
+ pendingNotifications.add(node);
+ }
+
+ makeDead(node);
+ }
+ }
+
+ /**
+ * Performs the post-processing work required after a read.
+ *
+ * @param node the entry in the page replacement policy
+ */
+ void afterRead(Node<K, V> node) {
+ final int bufferIndex = readBufferIndex();
+ final long writeCount = recordRead(bufferIndex, node);
+ drainOnReadIfNeeded(bufferIndex, writeCount);
+ notifyListener();
+ }
+
+ /** Returns the index to the read buffer to record into. */
+ static int readBufferIndex() {
+ // A buffer is chosen by the thread's id so that tasks are distributed in a
+ // pseudo evenly manner. This helps avoid hot entries causing contention
+ // due to other threads trying to append to the same buffer.
+ return ((int) Thread.currentThread().getId()) & READ_BUFFERS_MASK;
+ }
+
+ /**
+ * Records a read in the buffer and return its write count.
+ *
+ * @param bufferIndex the index to the chosen read buffer
+ * @param node the entry in the page replacement policy
+ * @return the number of writes on the chosen read buffer
+ */
+ long recordRead(int bufferIndex, Node<K, V> node) {
+ // The location in the buffer is chosen in a racy fashion as the increment
+ // is not atomic with the insertion. This means that concurrent reads can
+ // overlap and overwrite one another, resulting in a lossy buffer.
+ final AtomicLong counter = readBufferWriteCount[bufferIndex];
+ final long writeCount = counter.get();
+ counter.lazySet(writeCount + 1);
+
+ final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK);
+ readBuffers[bufferIndex][index].lazySet(node);
+
+ return writeCount;
+ }
+
+ /**
+ * Attempts to drain the buffers if it is determined to be needed when
+ * post-processing a read.
+ *
+ * @param bufferIndex the index to the chosen read buffer
+ * @param writeCount the number of writes on the chosen read buffer
+ */
+ void drainOnReadIfNeeded(int bufferIndex, long writeCount) {
+ final long pending = (writeCount - readBufferDrainAtWriteCount[bufferIndex].get());
+ final boolean delayable = (pending < READ_BUFFER_THRESHOLD);
+ final DrainStatus status = drainStatus.get();
+ if (status.shouldDrainBuffers(delayable)) {
+ tryToDrainBuffers();
}
+ }
- /**
- * Ensures that the object is not null.
- */
- static void checkNotNull(Object o) {
- if (o == null) {
- throw new NullPointerException();
- }
+ /**
+ * Performs the post-processing work required after a write.
+ *
+ * @param task the pending operation to be applied
+ */
+ void afterWrite(Runnable task) {
+ writeBuffer.add(task);
+ drainStatus.lazySet(REQUIRED);
+ tryToDrainBuffers();
+ notifyListener();
+ }
+
+ /**
+ * Attempts to acquire the eviction lock and apply the pending operations, up
+ * to the amortized threshold, to the page replacement policy.
+ */
+ void tryToDrainBuffers() {
+ if (evictionLock.tryLock()) {
+ try {
+ drainStatus.lazySet(PROCESSING);
+ drainBuffers();
+ } finally {
+ drainStatus.compareAndSet(PROCESSING, IDLE);
+ evictionLock.unlock();
+ }
+ }
+ }
+
+ /** Drains the read and write buffers up to an amortized threshold. */
+ @GuardedBy("evictionLock")
+ void drainBuffers() {
+ drainReadBuffers();
+ drainWriteBuffer();
+ }
+
+ /** Drains the read buffers, each up to an amortized threshold. */
+ @GuardedBy("evictionLock")
+ void drainReadBuffers() {
+ final int start = (int) Thread.currentThread().getId();
+ final int end = start + NUMBER_OF_READ_BUFFERS;
+ for (int i = start; i < end; i++) {
+ drainReadBuffer(i & READ_BUFFERS_MASK);
+ }
+ }
+
+ /** Drains the read buffer up to an amortized threshold. */
+ @GuardedBy("evictionLock")
+ void drainReadBuffer(int bufferIndex) {
+ final long writeCount = readBufferWriteCount[bufferIndex].get();
+ for (int i = 0; i < READ_BUFFER_DRAIN_THRESHOLD; i++) {
+ final int index = (int) (readBufferReadCount[bufferIndex] & READ_BUFFER_INDEX_MASK);
+ final AtomicReference<Node<K, V>> slot = readBuffers[bufferIndex][index];
+ final Node<K, V> node = slot.get();
+ if (node == null) {
+ break;
+ }
+
+ slot.lazySet(null);
+ applyRead(node);
+ readBufferReadCount[bufferIndex]++;
+ }
+ readBufferDrainAtWriteCount[bufferIndex].lazySet(writeCount);
+ }
+
+ /** Updates the node's location in the page replacement policy. */
+ @GuardedBy("evictionLock")
+ void applyRead(Node<K, V> node) {
+ // An entry may be scheduled for reordering despite having been removed.
+ // This can occur when the entry was concurrently read while a writer was
+ // removing it. If the entry is no longer linked then it does not need to
+ // be processed.
+ if (evictionDeque.contains(node)) {
+ evictionDeque.moveToBack(node);
+ }
+ }
+
+ /** Drains the read buffer up to an amortized threshold. */
+ @GuardedBy("evictionLock")
+ void drainWriteBuffer() {
+ for (int i = 0; i < WRITE_BUFFER_DRAIN_THRESHOLD; i++) {
+ final Runnable task = writeBuffer.poll();
+ if (task == null) {
+ break;
+ }
+ task.run();
+ }
+ }
+
+ /**
+ * Attempts to transition the node from the <tt>alive</tt> state to the
+ * <tt>retired</tt> state.
+ *
+ * @param node the entry in the page replacement policy
+ * @param expect the expected weighted value
+ * @return if successful
+ */
+ boolean tryToRetire(Node<K, V> node, WeightedValue<V> expect) {
+ if (expect.isAlive()) {
+ final WeightedValue<V> retired = new WeightedValue<V>(expect.value, -expect.weight);
+ return node.compareAndSet(expect, retired);
}
+ return false;
+ }
- /**
- * Ensures that the argument expression is true.
- */
- static void checkArgument(boolean expression) {
- if (!expression) {
- throw new IllegalArgumentException();
- }
+ /**
+ * Atomically transitions the node from the <tt>alive</tt> state to the
+ * <tt>retired</tt> state, if a valid transition.
+ *
+ * @param node the entry in the page replacement policy
+ */
+ void makeRetired(Node<K, V> node) {
+ for (;;) {
+ final WeightedValue<V> current = node.get();
+ if (!current.isAlive()) {
+ return;
+ }
+ final WeightedValue<V> retired = new WeightedValue<V>(current.value, -current.weight);
+ if (node.compareAndSet(current, retired)) {
+ return;
+ }
+ }
+ }
+
+ /**
+ * Atomically transitions the node to the <tt>dead</tt> state and decrements
+ * the <tt>weightedSize</tt>.
+ *
+ * @param node the entry in the page replacement policy
+ */
+ @GuardedBy("evictionLock")
+ void makeDead(Node<K, V> node) {
+ for (;;) {
+ WeightedValue<V> current = node.get();
+ WeightedValue<V> dead = new WeightedValue<V>(current.value, 0);
+ if (node.compareAndSet(current, dead)) {
+ weightedSize.lazySet(weightedSize.get() - Math.abs(current.weight));
+ return;
+ }
}
+ }
- /**
- * Ensures that the state expression is true.
- */
- static void checkState(boolean expression) {
- if (!expression) {
- throw new IllegalStateException();
- }
+ /** Notifies the listener of entries that were evicted. */
+ void notifyListener() {
+ Node<K, V> node;
+ while ((node = pendingNotifications.poll()) != null) {
+ listener.onEviction(node.key, node.getValue());
}
+ }
- /* ---------------- Eviction Support -------------- */
+ /** Adds the node to the page replacement policy. */
+ final class AddTask implements Runnable {
+ final Node<K, V> node;
+ final int weight;
- /**
- * Retrieves the maximum weighted capacity of the map.
- *
- * @return the maximum weighted capacity
- */
- public long capacity() {
- return capacity.get();
+ AddTask(Node<K, V> node, int weight) {
+ this.weight = weight;
+ this.node = node;
}
- /**
- * Sets the maximum weighted capacity of the map and eagerly evicts entries
- * until it shrinks to the appropriate size.
- *
- * @param capacity the maximum weighted capacity of the map
- * @throws IllegalArgumentException if the capacity is negative
- */
- public void setCapacity(long capacity) {
- checkArgument(capacity >= 0);
- evictionLock.lock();
- try {
- this.capacity.lazySet(Math.min(capacity, MAXIMUM_CAPACITY));
- drainBuffers();
- evict();
- } finally {
- evictionLock.unlock();
- }
- notifyListener();
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ weightedSize.lazySet(weightedSize.get() + weight);
+
+ // ignore out-of-order write operations
+ if (node.get().isAlive()) {
+ evictionDeque.add(node);
+ evict();
+ }
}
+ }
- /**
- * Determines whether the map has exceeded its capacity.
- */
- // @GuardedBy("evictionLock")
- boolean hasOverflowed() {
- return weightedSize.get() > capacity.get();
+ /** Removes a node from the page replacement policy. */
+ final class RemovalTask implements Runnable {
+ final Node<K, V> node;
+
+ RemovalTask(Node<K, V> node) {
+ this.node = node;
}
- /**
- * Evicts entries from the map while it exceeds the capacity and appends
- * evicted entries to the notification queue for processing.
- */
- // @GuardedBy("evictionLock")
- void evict() {
- // Attempts to evict entries from the map if it exceeds the maximum
- // capacity. If the eviction fails due to a concurrent removal of the
- // victim, that removal may cancel out the addition that triggered this
- // eviction. The victim is eagerly unlinked before the removal task so
- // that if an eviction is still required then a new victim will be chosen
- // for removal.
- while (hasOverflowed()) {
- final Node<K, V> node = evictionDeque.poll();
-
- // If weighted values are used, then the pending operations will adjust
- // the size to reflect the correct weight
- if (node == null) {
- return;
- }
-
- // Notify the listener only if the entry was evicted
- if (data.remove(node.key, node)) {
- pendingNotifications.add(node);
- }
-
- makeDead(node);
- }
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ // add may not have been processed yet
+ evictionDeque.remove(node);
+ makeDead(node);
}
+ }
- /**
- * Performs the post-processing work required after a read.
- *
- * @param node the entry in the page replacement policy
- */
- void afterRead(Node<K, V> node) {
- final int bufferIndex = readBufferIndex();
- final long writeCount = recordRead(bufferIndex, node);
- drainOnReadIfNeeded(bufferIndex, writeCount);
- notifyListener();
+ /** Updates the weighted size and evicts an entry on overflow. */
+ final class UpdateTask implements Runnable {
+ final int weightDifference;
+ final Node<K, V> node;
+
+ public UpdateTask(Node<K, V> node, int weightDifference) {
+ this.weightDifference = weightDifference;
+ this.node = node;
}
- /**
- * Returns the index to the read buffer to record into.
- */
- static int readBufferIndex() {
- // A buffer is chosen by the thread's id so that tasks are distributed in a
- // pseudo evenly manner. This helps avoid hot entries causing contention
- // due to other threads trying to append to the same buffer.
- return ((int) Thread.currentThread().getId()) & READ_BUFFERS_MASK;
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ weightedSize.lazySet(weightedSize.get() + weightDifference);
+ applyRead(node);
+ evict();
}
+ }
- /**
- * Records a read in the buffer and return its write count.
- *
- * @param bufferIndex the index to the chosen read buffer
- * @param node the entry in the page replacement policy
- * @return the number of writes on the chosen read buffer
- */
- long recordRead(int bufferIndex, Node<K, V> node) {
- // The location in the buffer is chosen in a racy fashion as the increment
- // is not atomic with the insertion. This means that concurrent reads can
- // overlap and overwrite one another, resulting in a lossy buffer.
- final AtomicLong counter = readBufferWriteCount[bufferIndex];
- final long writeCount = counter.get();
- counter.lazySet(writeCount + 1);
+ /* ---------------- Concurrent Map Support -------------- */
- final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK);
- readBuffers[bufferIndex][index].lazySet(node);
+ @Override
+ public boolean isEmpty() {
+ return data.isEmpty();
+ }
- return writeCount;
- }
+ @Override
+ public int size() {
+ return data.size();
+ }
- /**
- * Attempts to drain the buffers if it is determined to be needed when
- * post-processing a read.
- *
- * @param bufferIndex the index to the chosen read buffer
- * @param writeCount the number of writes on the chosen read buffer
- */
- void drainOnReadIfNeeded(int bufferIndex, long writeCount) {
- final long pending = (writeCount - readBufferDrainAtWriteCount[bufferIndex].get());
- final boolean delayable = (pending < READ_BUFFER_THRESHOLD);
- final DrainStatus status = drainStatus.get();
- if (status.shouldDrainBuffers(delayable)) {
- tryToDrainBuffers();
- }
- }
+ /**
+ * Returns the weighted size of this map.
+ *
+ * @return the combined weight of the values in this map
+ */
+ public long weightedSize() {
+ return Math.max(0, weightedSize.get());
+ }
+
+ @Override
+ public void clear() {
+ evictionLock.lock();
+ try {
+ // Discard all entries
+ Node<K, V> node;
+ while ((node = evictionDeque.poll()) != null) {
+ data.remove(node.key, node);
+ makeDead(node);
+ }
+
+ // Discard all pending reads
+ for (AtomicReference<Node<K, V>>[] buffer : readBuffers) {
+ for (AtomicReference<Node<K, V>> slot : buffer) {
+ slot.lazySet(null);
+ }
+ }
+
+ // Apply all pending writes
+ Runnable task;
+ while ((task = writeBuffer.poll()) != null) {
+ task.run();
+ }
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return data.containsKey(key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ checkNotNull(value);
+
+ for (Node<K, V> node : data.values()) {
+ if (node.getValue().equals(value)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public V get(Object key) {
+ final Node<K, V> node = data.get(key);
+ if (node == null) {
+ return null;
+ }
+ afterRead(node);
+ return node.getValue();
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped, or {@code null}
+ * if this map contains no mapping for the key. This method differs from
+ * {@link #get(Object)} in that it does not record the operation with the
+ * page replacement policy.
+ *
+ * @param key the key whose associated value is to be returned
+ * @return the value to which the specified key is mapped, or
+ * {@code null} if this map contains no mapping for the key
+ * @throws NullPointerException if the specified key is null
+ */
+ public V getQuietly(Object key) {
+ final Node<K, V> node = data.get(key);
+ return (node == null) ? null : node.getValue();
+ }
+
+ @Override
+ public V put(K key, V value) {
+ return put(key, value, false);
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ return put(key, value, true);
+ }
+
+ /**
+ * Adds a node to the list and the data store. If an existing node is found,
+ * then its value is updated if allowed.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @param onlyIfAbsent a write is performed only if the key is not already
+ * associated with a value
+ * @return the prior value in the data store or null if no mapping was found
+ */
+ V put(K key, V value, boolean onlyIfAbsent) {
+ checkNotNull(key);
+ checkNotNull(value);
+
+ final int weight = weigher.weightOf(key, value);
+ final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
+ final Node<K, V> node = new Node<K, V>(key, weightedValue);
+
+ for (;;) {
+ final Node<K, V> prior = data.putIfAbsent(node.key, node);
+ if (prior == null) {
+ afterWrite(new AddTask(node, weight));
+ return null;
+ } else if (onlyIfAbsent) {
+ afterRead(prior);
+ return prior.getValue();
+ }
+ for (;;) {
+ final WeightedValue<V> oldWeightedValue = prior.get();
+ if (!oldWeightedValue.isAlive()) {
+ break;
+ }
+
+ if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
+ final int weightedDifference = weight - oldWeightedValue.weight;
+ if (weightedDifference == 0) {
+ afterRead(prior);
+ } else {
+ afterWrite(new UpdateTask(prior, weightedDifference));
+ }
+ return oldWeightedValue.value;
+ }
+ }
+ }
+ }
+
+ @Override
+ public V remove(Object key) {
+ final Node<K, V> node = data.remove(key);
+ if (node == null) {
+ return null;
+ }
+
+ makeRetired(node);
+ afterWrite(new RemovalTask(node));
+ return node.getValue();
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ final Node<K, V> node = data.get(key);
+ if ((node == null) || (value == null)) {
+ return false;
+ }
+
+ WeightedValue<V> weightedValue = node.get();
+ for (;;) {
+ if (weightedValue.contains(value)) {
+ if (tryToRetire(node, weightedValue)) {
+ if (data.remove(key, node)) {
+ afterWrite(new RemovalTask(node));
+ return true;
+ }
+ } else {
+ weightedValue = node.get();
+ if (weightedValue.isAlive()) {
+ // retry as an intermediate update may have replaced the value with
+ // an equal instance that has a different reference identity
+ continue;
+ }
+ }
+ }
+ return false;
+ }
+ }
+
+ @Override
+ public V replace(K key, V value) {
+ checkNotNull(key);
+ checkNotNull(value);
+
+ final int weight = weigher.weightOf(key, value);
+ final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
+
+ final Node<K, V> node = data.get(key);
+ if (node == null) {
+ return null;
+ }
+ for (;;) {
+ final WeightedValue<V> oldWeightedValue = node.get();
+ if (!oldWeightedValue.isAlive()) {
+ return null;
+ }
+ if (node.compareAndSet(oldWeightedValue, weightedValue)) {
+ final int weightedDifference = weight - oldWeightedValue.weight;
+ if (weightedDifference == 0) {
+ afterRead(node);
+ } else {
+ afterWrite(new UpdateTask(node, weightedDifference));
+ }
+ return oldWeightedValue.value;
+ }
+ }
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ checkNotNull(key);
+ checkNotNull(oldValue);
+ checkNotNull(newValue);
+
+ final int weight = weigher.weightOf(key, newValue);
+ final WeightedValue<V> newWeightedValue = new WeightedValue<V>(newValue, weight);
+
+ final Node<K, V> node = data.get(key);
+ if (node == null) {
+ return false;
+ }
+ for (;;) {
+ final WeightedValue<V> weightedValue = node.get();
+ if (!weightedValue.isAlive() || !weightedValue.contains(oldValue)) {
+ return false;
+ }
+ if (node.compareAndSet(weightedValue, newWeightedValue)) {
+ final int weightedDifference = weight - weightedValue.weight;
+ if (weightedDifference == 0) {
+ afterRead(node);
+ } else {
+ afterWrite(new UpdateTask(node, weightedDifference));
+ }
+ return true;
+ }
+ }
+ }
+
+ @Override
+ public Set<K> keySet() {
+ final Set<K> ks = keySet;
+ return (ks == null) ? (keySet = new KeySet()) : ks;
+ }
+
+ /**
+ * Returns a unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the ascending order in which its entries are considered eligible for
+ * retention, from the least-likely to be retained to the most-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @return an ascending snapshot view of the keys in this map
+ */
+ public Set<K> ascendingKeySet() {
+ return ascendingKeySetWithLimit(Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the ascending order in which its entries are considered eligible for
+ * retention, from the least-likely to be retained to the most-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @param limit the maximum size of the returned set
+ * @return a ascending snapshot view of the keys in this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Set<K> ascendingKeySetWithLimit(int limit) {
+ return orderedKeySet(true, limit);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the descending order in which its entries are considered eligible for
+ * retention, from the most-likely to be retained to the least-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @return a descending snapshot view of the keys in this map
+ */
+ public Set<K> descendingKeySet() {
+ return descendingKeySetWithLimit(Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the descending order in which its entries are considered eligible for
+ * retention, from the most-likely to be retained to the least-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @param limit the maximum size of the returned set
+ * @return a descending snapshot view of the keys in this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Set<K> descendingKeySetWithLimit(int limit) {
+ return orderedKeySet(false, limit);
+ }
+
+ Set<K> orderedKeySet(boolean ascending, int limit) {
+ checkArgument(limit >= 0);
+ evictionLock.lock();
+ try {
+ drainBuffers();
+
+ final int initialCapacity = (weigher == Weighers.entrySingleton())
+ ? Math.min(limit, (int) weightedSize())
+ : 16;
+ final Set<K> keys = new LinkedHashSet<K>(initialCapacity);
+ final Iterator<Node<K, V>> iterator = ascending
+ ? evictionDeque.iterator()
+ : evictionDeque.descendingIterator();
+ while (iterator.hasNext() && (limit > keys.size())) {
+ keys.add(iterator.next().key);
+ }
+ return unmodifiableSet(keys);
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ @Override
+ public Collection<V> values() {
+ final Collection<V> vs = values;
+ return (vs == null) ? (values = new Values()) : vs;
+ }
+
+ @Override
+ public Set<Entry<K, V>> entrySet() {
+ final Set<Entry<K, V>> es = entrySet;
+ return (es == null) ? (entrySet = new EntrySet()) : es;
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the ascending order in which its entries are considered
+ * eligible for retention, from the least-likely to be retained to the
+ * most-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @return a ascending snapshot view of this map
+ */
+ public Map<K, V> ascendingMap() {
+ return ascendingMapWithLimit(Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the ascending order in which its entries are considered
+ * eligible for retention, from the least-likely to be retained to the
+ * most-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @param limit the maximum size of the returned map
+ * @return a ascending snapshot view of this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Map<K, V> ascendingMapWithLimit(int limit) {
+ return orderedMap(true, limit);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the descending order in which its entries are considered
+ * eligible for retention, from the most-likely to be retained to the
+ * least-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @return a descending snapshot view of this map
+ */
+ public Map<K, V> descendingMap() {
+ return descendingMapWithLimit(Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the descending order in which its entries are considered
+ * eligible for retention, from the most-likely to be retained to the
+ * least-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @param limit the maximum size of the returned map
+ * @return a descending snapshot view of this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Map<K, V> descendingMapWithLimit(int limit) {
+ return orderedMap(false, limit);
+ }
+
+ Map<K, V> orderedMap(boolean ascending, int limit) {
+ checkArgument(limit >= 0);
+ evictionLock.lock();
+ try {
+ drainBuffers();
+
+ final int initialCapacity = (weigher == Weighers.entrySingleton())
+ ? Math.min(limit, (int) weightedSize())
+ : 16;
+ final Map<K, V> map = new LinkedHashMap<K, V>(initialCapacity);
+ final Iterator<Node<K, V>> iterator = ascending
+ ? evictionDeque.iterator()
+ : evictionDeque.descendingIterator();
+ while (iterator.hasNext() && (limit > map.size())) {
+ Node<K, V> node = iterator.next();
+ map.put(node.key, node.getValue());
+ }
+ return unmodifiableMap(map);
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ /** The draining status of the buffers. */
+ enum DrainStatus {
+
+ /** A drain is not taking place. */
+ IDLE {
+ @Override boolean shouldDrainBuffers(boolean delayable) {
+ return !delayable;
+ }
+ },
+
+ /** A drain is required due to a pending write modification. */
+ REQUIRED {
+ @Override boolean shouldDrainBuffers(boolean delayable) {
+ return true;
+ }
+ },
+
+ /** A drain is in progress. */
+ PROCESSING {
+ @Override boolean shouldDrainBuffers(boolean delayable) {
+ return false;
+ }
+ };
/**
- * Performs the post-processing work required after a write.
+ * Determines whether the buffers should be drained.
*
- * @param task the pending operation to be applied
+ * @param delayable if a drain should be delayed until required
+ * @return if a drain should be attempted
*/
- void afterWrite(Runnable task) {
- writeBuffer.add(task);
- drainStatus.lazySet(REQUIRED);
- tryToDrainBuffers();
- notifyListener();
- }
+ abstract boolean shouldDrainBuffers(boolean delayable);
+ }
- /**
- * Attempts to acquire the eviction lock and apply the pending operations, up
- * to the amortized threshold, to the page replacement policy.
- */
- void tryToDrainBuffers() {
- if (evictionLock.tryLock()) {
- try {
- drainStatus.lazySet(PROCESSING);
- drainBuffers();
- } finally {
- drainStatus.compareAndSet(PROCESSING, IDLE);
- evictionLock.unlock();
- }
- }
- }
+ /** A value, its weight, and the entry's status. */
+ @Immutable
+ static final class WeightedValue<V> {
+ final int weight;
+ final V value;
- /**
- * Drains the read and write buffers up to an amortized threshold.
- */
- // @GuardedBy("evictionLock")
- void drainBuffers() {
- drainReadBuffers();
- drainWriteBuffer();
+ WeightedValue(V value, int weight) {
+ this.weight = weight;
+ this.value = value;
}
- /**
- * Drains the read buffers, each up to an amortized threshold.
- */
- // @GuardedBy("evictionLock")
- void drainReadBuffers() {
- final int start = (int) Thread.currentThread().getId();
- final int end = start + NUMBER_OF_READ_BUFFERS;
- for (int i = start; i < end; i++) {
- drainReadBuffer(i & READ_BUFFERS_MASK);
- }
+ boolean contains(Object o) {
+ return (o == value) || value.equals(o);
}
/**
- * Drains the read buffer up to an amortized threshold.
+ * If the entry is available in the hash-table and page replacement policy.
*/
- // @GuardedBy("evictionLock")
- void drainReadBuffer(int bufferIndex) {
- final long writeCount = readBufferWriteCount[bufferIndex].get();
- for (int i = 0; i < READ_BUFFER_DRAIN_THRESHOLD; i++) {
- final int index = (int) (readBufferReadCount[bufferIndex] & READ_BUFFER_INDEX_MASK);
- final AtomicReference<Node<K, V>> slot = readBuffers[bufferIndex][index];
- final Node<K, V> node = slot.get();
- if (node == null) {
- break;
- }
-
- slot.lazySet(null);
- applyRead(node);
- readBufferReadCount[bufferIndex]++;
- }
- readBufferDrainAtWriteCount[bufferIndex].lazySet(writeCount);
+ boolean isAlive() {
+ return weight > 0;
}
/**
- * Updates the node's location in the page replacement policy.
+ * If the entry was removed from the hash-table and is awaiting removal from
+ * the page replacement policy.
*/
- // @GuardedBy("evictionLock")
- void applyRead(Node<K, V> node) {
- // An entry may be scheduled for reordering despite having been removed.
- // This can occur when the entry was concurrently read while a writer was
- // removing it. If the entry is no longer linked then it does not need to
- // be processed.
- if (evictionDeque.contains(node)) {
- evictionDeque.moveToBack(node);
- }
+ boolean isRetired() {
+ return weight < 0;
}
/**
- * Drains the read buffer up to an amortized threshold.
+ * If the entry was removed from the hash-table and the page replacement
+ * policy.
*/
- // @GuardedBy("evictionLock")
- void drainWriteBuffer() {
- for (int i = 0; i < WRITE_BUFFER_DRAIN_THRESHOLD; i++) {
- final Runnable task = writeBuffer.poll();
- if (task == null) {
- break;
- }
- task.run();
- }
+ boolean isDead() {
+ return weight == 0;
}
+ }
- /**
- * Attempts to transition the node from the <tt>alive</tt> state to the
- * <tt>retired</tt> state.
- *
- * @param node the entry in the page replacement policy
- * @param expect the expected weighted value
- * @return if successful
- */
- boolean tryToRetire(Node<K, V> node, WeightedValue<V> expect) {
- if (expect.isAlive()) {
- final WeightedValue<V> retired = new WeightedValue<V>(expect.value, -expect.weight);
- return node.compareAndSet(expect, retired);
- }
- return false;
- }
+ /**
+ * A node contains the key, the weighted value, and the linkage pointers on
+ * the page-replacement algorithm's data structures.
+ */
+ @SuppressWarnings("serial")
+ static final class Node<K, V> extends AtomicReference<WeightedValue<V>>
+ implements Linked<Node<K, V>> {
+ final K key;
+ @GuardedBy("evictionLock")
+ Node<K, V> prev;
+ @GuardedBy("evictionLock")
+ Node<K, V> next;
- /**
- * Atomically transitions the node from the <tt>alive</tt> state to the
- * <tt>retired</tt> state, if a valid transition.
- *
- * @param node the entry in the page replacement policy
- */
- void makeRetired(Node<K, V> node) {
- for (; ; ) {
- final WeightedValue<V> current = node.get();
- if (!current.isAlive()) {
- return;
- }
- final WeightedValue<V> retired = new WeightedValue<V>(current.value, -current.weight);
- if (node.compareAndSet(current, retired)) {
- return;
- }
- }
+ /** Creates a new, unlinked node. */
+ Node(K key, WeightedValue<V> weightedValue) {
+ super(weightedValue);
+ this.key = key;
}
- /**
- * Atomically transitions the node to the <tt>dead</tt> state and decrements
- * the <tt>weightedSize</tt>.
- *
- * @param node the entry in the page replacement policy
- */
- // @GuardedBy("evictionLock")
- void makeDead(Node<K, V> node) {
- for (; ; ) {
- WeightedValue<V> current = node.get();
- WeightedValue<V> dead = new WeightedValue<V>(current.value, 0);
- if (node.compareAndSet(current, dead)) {
- weightedSize.lazySet(weightedSize.get() - Math.abs(current.weight));
- return;
- }
- }
+ @Override
+ @GuardedBy("evictionLock")
+ public Node<K, V> getPrevious() {
+ return prev;
}
- /**
- * Notifies the listener of entries that were evicted.
- */
- void notifyListener() {
- Node<K, V> node;
- while ((node = pendingNotifications.poll()) != null) {
- listener.onEviction(node.key, node.getValue());
- }
+ @Override
+ @GuardedBy("evictionLock")
+ public void setPrevious(Node<K, V> prev) {
+ this.prev = prev;
}
- /**
- * Adds the node to the page replacement policy.
- */
- final class AddTask implements Runnable {
- final Node<K, V> node;
- final int weight;
-
- AddTask(Node<K, V> node, int weight) {
- this.weight = weight;
- this.node = node;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public void run() {
- weightedSize.lazySet(weightedSize.get() + weight);
-
- // ignore out-of-order write operations
- if (node.get().isAlive()) {
- evictionDeque.add(node);
- evict();
- }
- }
+ @Override
+ @GuardedBy("evictionLock")
+ public Node<K, V> getNext() {
+ return next;
}
- /**
- * Removes a node from the page replacement policy.
- */
- final class RemovalTask implements Runnable {
- final Node<K, V> node;
-
- RemovalTask(Node<K, V> node) {
- this.node = node;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public void run() {
- // add may not have been processed yet
- evictionDeque.remove(node);
- makeDead(node);
- }
+ @Override
+ @GuardedBy("evictionLock")
+ public void setNext(Node<K, V> next) {
+ this.next = next;
}
- /**
- * Updates the weighted size and evicts an entry on overflow.
- */
- final class UpdateTask implements Runnable {
- final int weightDifference;
- final Node<K, V> node;
-
- public UpdateTask(Node<K, V> node, int weightDifference) {
- this.weightDifference = weightDifference;
- this.node = node;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public void run() {
- weightedSize.lazySet(weightedSize.get() + weightDifference);
- applyRead(node);
- evict();
- }
+ /** Retrieves the value held by the current <tt>WeightedValue</tt>. */
+ V getValue() {
+ return get().value;
}
+ }
- /* ---------------- Concurrent Map Support -------------- */
+ /** An adapter to safely externalize the keys. */
+ final class KeySet extends AbstractSet<K> {
+ final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
@Override
- public boolean isEmpty() {
- return data.isEmpty();
+ public int size() {
+ return map.size();
}
@Override
- public int size() {
- return data.size();
+ public void clear() {
+ map.clear();
}
- /**
- * Returns the weighted size of this map.
- *
- * @return the combined weight of the values in this map
- */
- public long weightedSize() {
- return Math.max(0, weightedSize.get());
+ @Override
+ public Iterator<K> iterator() {
+ return new KeyIterator();
}
@Override
- public void clear() {
- evictionLock.lock();
- try {
- // Discard all entries
- Node<K, V> node;
- while ((node = evictionDeque.poll()) != null) {
- data.remove(node.key, node);
- makeDead(node);
- }
-
- // Discard all pending reads
- for (AtomicReference<Node<K, V>>[] buffer : readBuffers) {
- for (AtomicReference<Node<K, V>> slot : buffer) {
- slot.lazySet(null);
- }
- }
-
- // Apply all pending writes
- Runnable task;
- while ((task = writeBuffer.poll()) != null) {
- task.run();
- }
- } finally {
- evictionLock.unlock();
- }
+ public boolean contains(Object obj) {
+ return containsKey(obj);
}
@Override
- public boolean containsKey(Object key) {
- return data.containsKey(key);
+ public boolean remove(Object obj) {
+ return (map.remove(obj) != null);
}
@Override
- public boolean containsValue(Object value) {
- checkNotNull(value);
-
- for (Node<K, V> node : data.values()) {
- if (node.getValue().equals(value)) {
- return true;
- }
- }
- return false;
+ public Object[] toArray() {
+ return map.data.keySet().toArray();
}
@Override
- public V get(Object key) {
- final Node<K, V> node = data.get(key);
- if (node == null) {
- return null;
- }
- afterRead(node);
- return node.getValue();
+ public <T> T[] toArray(T[] array) {
+ return map.data.keySet().toArray(array);
}
+ }
- /**
- * Returns the value to which the specified key is mapped, or {@code null}
- * if this map contains no mapping for the key. This method differs from
- * {@link #get(Object)} in that it does not record the operation with the
- * page replacement policy.
- *
- * @param key the key whose associated value is to be returned
- * @return the value to which the specified key is mapped, or
- * {@code null} if this map contains no mapping for the key
- * @throws NullPointerException if the specified key is null
- */
- public V getQuietly(Object key) {
- final Node<K, V> node = data.get(key);
- return (node == null) ? null : node.getValue();
- }
+ /** An adapter to safely externalize the key iterator. */
+ final class KeyIterator implements Iterator<K> {
+ final Iterator<K> iterator = data.keySet().iterator();
+ K current;
@Override
- public V put(K key, V value) {
- return put(key, value, false);
+ public boolean hasNext() {
+ return iterator.hasNext();
}
@Override
- public V putIfAbsent(K key, V value) {
- return put(key, value, true);
+ public K next() {
+ current = iterator.next();
+ return current;
}
- /**
- * Adds a node to the list and the data store. If an existing node is found,
- * then its value is updated if allowed.
- *
- * @param key key with which the specified value is to be associated
- * @param value value to be associated with the specified key
- * @param onlyIfAbsent a write is performed only if the key is not already
- * associated with a value
- * @return the prior value in the data store or null if no mapping was found
- */
- V put(K key, V value, boolean onlyIfAbsent) {
- checkNotNull(key);
- checkNotNull(value);
-
- final int weight = weigher.weightOf(key, value);
- final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
- final Node<K, V> node = new Node<K, V>(key, weightedValue);
-
- for (; ; ) {
- final Node<K, V> prior = data.putIfAbsent(node.key, node);
- if (prior == null) {
- afterWrite(new AddTask(node, weight));
- return null;
- } else if (onlyIfAbsent) {
- afterRead(prior);
- return prior.getValue();
- }
- for (; ; ) {
- final WeightedValue<V> oldWeightedValue = prior.get();
- if (!oldWeightedValue.isAlive()) {
- break;
- }
-
- if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
- final int weightedDifference = weight - oldWeightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(prior);
- } else {
- afterWrite(new UpdateTask(prior, weightedDifference));
- }
- return oldWeightedValue.value;
- }
- }
- }
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current);
+ current = null;
}
+ }
- @Override
- public V remove(Object key) {
- final Node<K, V> node = data.remove(key);
- if (node == null) {
- return null;
- }
+ /** An adapter to safely externalize the values. */
+ final class Values extends AbstractCollection<V> {
- makeRetired(node);
- afterWrite(new RemovalTask(node));
- return node.getValue();
+ @Override
+ public int size() {
+ return ConcurrentLinkedHashMap.this.size();
}
@Override
- public boolean remove(Object key, Object value) {
- final Node<K, V> node = data.get(key);
- if ((node == null) || (value == null)) {
- return false;
- }
-
- WeightedValue<V> weightedValue = node.get();
- for (; ; ) {
- if (weightedValue.contains(value)) {
- if (tryToRetire(node, weightedValue)) {
- if (data.remove(key, node)) {
- afterWrite(new RemovalTask(node));
- return true;
- }
- } else {
- weightedValue = node.get();
- if (weightedValue.isAlive()) {
- // retry as an intermediate update may have replaced the value with
- // an equal instance that has a different reference identity
- continue;
- }
- }
- }
- return false;
- }
+ public void clear() {
+ ConcurrentLinkedHashMap.this.clear();
}
@Override
- public V replace(K key, V value) {
- checkNotNull(key);
- checkNotNull(value);
-
- final int weight = weigher.weightOf(key, value);
- final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
-
- final Node<K, V> node = data.get(key);
- if (node == null) {
- return null;
- }
- for (; ; ) {
- final WeightedValue<V> oldWeightedValue = node.get();
- if (!oldWeightedValue.isAlive()) {
- return null;
- }
- if (node.compareAndSet(oldWeightedValue, weightedValue)) {
- final int weightedDifference = weight - oldWeightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(node);
- } else {
- afterWrite(new UpdateTask(node, weightedDifference));
- }
- return oldWeightedValue.value;
- }
- }
+ public Iterator<V> iterator() {
+ return new ValueIterator();
}
@Override
- public boolean replace(K key, V oldValue, V newValue) {
- checkNotNull(key);
- checkNotNull(oldValue);
- checkNotNull(newValue);
+ public boolean contains(Object o) {
+ return containsValue(o);
+ }
+ }
- final int weight = weigher.weightOf(key, newValue);
- final WeightedValue<V> newWeightedValue = new WeightedValue<V>(newValue, weight);
+ /** An adapter to safely externalize the value iterator. */
+ final class ValueIterator implements Iterator<V> {
+ final Iterator<Node<K, V>> iterator = data.values().iterator();
+ Node<K, V> current;
- final Node<K, V> node = data.get(key);
- if (node == null) {
- return false;
- }
- for (; ; ) {
- final WeightedValue<V> weightedValue = node.get();
- if (!weightedValue.isAlive() || !weightedValue.contains(oldValue)) {
- return false;
- }
- if (node.compareAndSet(weightedValue, newWeightedValue)) {
- final int weightedDifference = weight - weightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(node);
- } else {
- afterWrite(new UpdateTask(node, weightedDifference));
- }
- return true;
- }
- }
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
}
@Override
- public Set<K> keySet() {
- final Set<K> ks = keySet;
- return (ks == null) ? (keySet = new KeySet()) : ks;
+ public V next() {
+ current = iterator.next();
+ return current.getValue();
}
- /**
- * Returns a unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the ascending order in which its entries are considered eligible for
- * retention, from the least-likely to be retained to the most-likely.
- * <p>
- * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @return an ascending snapshot view of the keys in this map
- */
- public Set<K> ascendingKeySet() {
- return ascendingKeySetWithLimit(Integer.MAX_VALUE);
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current.key);
+ current = null;
}
+ }
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the ascending order in which its entries are considered eligible for
- * retention, from the least-likely to be retained to the most-likely.
- * <p>
- * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @param limit the maximum size of the returned set
- * @return a ascending snapshot view of the keys in this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Set<K> ascendingKeySetWithLimit(int limit) {
- return orderedKeySet(true, limit);
- }
+ /** An adapter to safely externalize the entries. */
+ final class EntrySet extends AbstractSet<Entry<K, V>> {
+ final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the descending order in which its entries are considered eligible for
- * retention, from the most-likely to be retained to the least-likely.
- * <p>
- * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @return a descending snapshot view of the keys in this map
- */
- public Set<K> descendingKeySet() {
- return descendingKeySetWithLimit(Integer.MAX_VALUE);
+ @Override
+ public int size() {
+ return map.size();
}
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the descending order in which its entries are considered eligible for
- * retention, from the most-likely to be retained to the least-likely.
- * <p>
- * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @param limit the maximum size of the returned set
- * @return a descending snapshot view of the keys in this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Set<K> descendingKeySetWithLimit(int limit) {
- return orderedKeySet(false, limit);
- }
-
- Set<K> orderedKeySet(boolean ascending, int limit) {
- checkArgument(limit >= 0);
- evictionLock.lock();
- try {
- drainBuffers();
-
- final int initialCapacity = (weigher == Weighers.entrySingleton())
- ? Math.min(limit, (int) weightedSize())
- : 16;
- final Set<K> keys = new LinkedHashSet<K>(initialCapacity);
- final Iterator<Node<K, V>> iterator = ascending
- ? evictionDeque.iterator()
- : evictionDeque.descendingIterator();
- while (iterator.hasNext() && (limit > keys.size())) {
- keys.add(iterator.next().key);
- }
- return unmodifiableSet(keys);
- } finally {
- evictionLock.unlock();
- }
+ @Override
+ public void clear() {
+ map.clear();
}
@Override
- public Collection<V> values() {
- final Collection<V> vs = values;
- return (vs == null) ? (values = new Values()) : vs;
+ public Iterator<Entry<K, V>> iterator() {
+ return new EntryIterator();
}
@Override
- public Set<Entry<K, V>> entrySet() {
- final Set<Entry<K, V>> es = entrySet;
- return (es == null) ? (entrySet = new EntrySet()) : es;
+ public boolean contains(Object obj) {
+ if (!(obj instanceof Entry<?, ?>)) {
+ return false;
+ }
+ Entry<?, ?> entry = (Entry<?, ?>) obj;
+ Node<K, V> node = map.data.get(entry.getKey());
+ return (node != null) && (node.getValue().equals(entry.getValue()));
}
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the ascending order in which its entries are considered
- * eligible for retention, from the least-likely to be retained to the
- * most-likely.
- * <p>
- * Beware that obtaining the mappings is <em>NOT</em> a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @return a ascending snapshot view of this map
- */
- public Map<K, V> ascendingMap() {
- return ascendingMapWithLimit(Integer.MAX_VALUE);
+ @Override
+ public boolean add(Entry<K, V> entry) {
+ return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null);
}
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the ascending order in which its entries are considered
- * eligible for retention, from the least-likely to be retained to the
- * most-likely.
- * <p>
- * Beware that obtaining the mappings is <em>NOT</em> a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @param limit the maximum size of the returned map
- * @return a ascending snapshot view of this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Map<K, V> ascendingMapWithLimit(int limit) {
- return orderedMap(true, limit);
+ @Override
+ public boolean remove(Object obj) {
+ if (!(obj instanceof Entry<?, ?>)) {
+ return false;
+ }
+ Entry<?, ?> entry = (Entry<?, ?>) obj;
+ return map.remove(entry.getKey(), entry.getValue());
}
+ }
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the descending order in which its entries are considered
- * eligible for retention, from the most-likely to be retained to the
- * least-likely.
- * <p>
- * Beware that obtaining the mappings is <em>NOT</em> a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @return a descending snapshot view of this map
- */
- public Map<K, V> descendingMap() {
- return descendingMapWithLimit(Integer.MAX_VALUE);
- }
+ /** An adapter to safely externalize the entry iterator. */
+ final class EntryIterator implements Iterator<Entry<K, V>> {
+ final Iterator<Node<K, V>> iterator = data.values().iterator();
+ Node<K, V> current;
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the descending order in which its entries are considered
- * eligible for retention, from the most-likely to be retained to the
- * least-likely.
- * <p>
- * Beware that obtaining the mappings is <em>NOT</em> a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @param limit the maximum size of the returned map
- * @return a descending snapshot view of this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Map<K, V> descendingMapWithLimit(int limit) {
- return orderedMap(false, limit);
- }
-
- Map<K, V> orderedMap(boolean ascending, int limit) {
- checkArgument(limit >= 0);
- evictionLock.lock();
- try {
- drainBuffers();
-
- final int initialCapacity = (weigher == Weighers.entrySingleton())
- ? Math.min(limit, (int) weightedSize())
- : 16;
- final Map<K, V> map = new LinkedHashMap<K, V>(initialCapacity);
- final Iterator<Node<K, V>> iterator = ascending
- ? evictionDeque.iterator()
- : evictionDeque.descendingIterator();
- while (iterator.hasNext() && (limit > map.size())) {
- Node<K, V> node = iterator.next();
- map.put(node.key, node.getValue());
- }
- return unmodifiableMap(map);
- } finally {
- evictionLock.unlock();
- }
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
}
- /**
- * The draining status of the buffers.
- */
- enum DrainStatus {
-
- /**
- * A drain is not taking place.
- */
- IDLE {
- @Override
- boolean shouldDrainBuffers(boolean delayable) {
- return !delayable;
- }
- },
-
- /**
- * A drain is required due to a pending write modification.
- */
- REQUIRED {
- @Override
- boolean shouldDrainBuffers(boolean delayable) {
- return true;
- }
- },
-
- /**
- * A drain is in progress.
- */
- PROCESSING {
- @Override
- boolean shouldDrainBuffers(boolean delayable) {
- return false;
- }
- };
-
- /**
- * Determines whether the buffers should be drained.
- *
- * @param delayable if a drain should be delayed until required
- * @return if a drain should be attempted
- */
- abstract boolean shouldDrainBuffers(boolean delayable);
+ @Override
+ public Entry<K, V> next() {
+ current = iterator.next();
+ return new WriteThroughEntry(current);
}
- /**
- * A value, its weight, and the entry's status.
- */
-// @Immutable
- static final class WeightedValue<V> {
- final int weight;
- final V value;
-
- WeightedValue(V value, int weight) {
- this.weight = weight;
- this.value = value;
- }
-
- boolean contains(Object o) {
- return (o == value) || value.equals(o);
- }
-
- /**
- * If the entry is available in the hash-table and page replacement policy.
- */
- boolean isAlive() {
- return weight > 0;
- }
-
- /**
- * If the entry was removed from the hash-table and is awaiting removal from
- * the page replacement policy.
- */
- boolean isRetired() {
- return weight < 0;
- }
-
- /**
- * If the entry was removed from the hash-table and the page replacement
- * policy.
- */
- boolean isDead() {
- return weight == 0;
- }
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current.key);
+ current = null;
}
+ }
- /**
- * A node contains the key, the weighted value, and the linkage pointers on
- * the page-replacement algorithm's data structures.
- */
- @SuppressWarnings("serial")
- static final class Node<K, V> extends AtomicReference<WeightedValue<V>>
- implements Linked<Node<K, V>> {
- final K key;
- // @GuardedBy("evictionLock")
- Node<K, V> prev;
- // @GuardedBy("evictionLock")
- Node<K, V> next;
-
- /**
- * Creates a new, unlinked node.
- */
- Node(K key, WeightedValue<V> weightedValue) {
- super(weightedValue);
- this.key = key;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public Node<K, V> getPrevious() {
- return prev;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public void setPrevious(Node<K, V> prev) {
- this.prev = prev;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public Node<K, V> getNext() {
- return next;
- }
-
- @Override
- // @GuardedBy("evictionLock")
- public void setNext(Node<K, V> next) {
- this.next = next;
- }
+ /** An entry that allows updates to write through to the map. */
+ final class WriteThroughEntry extends SimpleEntry<K, V> {
+ static final long serialVersionUID = 1;
- /**
- * Retrieves the value held by the current <tt>WeightedValue</tt>.
- */
- V getValue() {
- return get().value;
- }
+ WriteThroughEntry(Node<K, V> node) {
+ super(node.key, node.getValue());
}
- /**
- * An adapter to safely externalize the keys.
- */
- final class KeySet extends AbstractSet<K> {
- final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
-
- @Override
- public int size() {
- return map.size();
- }
-
- @Override
- public void clear() {
- map.clear();
- }
-
- @Override
- public Iterator<K> iterator() {
- return new KeyIterator();
- }
-
- @Override
- public boolean contains(Object obj) {
- return containsKey(obj);
- }
-
- @Override
- public boolean remove(Object obj) {
- return (map.remove(obj) != null);
- }
-
- @Override
- public Object[] toArray() {
- return map.data.keySet().toArray();
- }
+ @Override
+ public V setValue(V value) {
+ put(getKey(), value);
+ return super.setValue(value);
+ }
- @Override
- public <T> T[] toArray(T[] array) {
- return map.data.keySet().toArray(array);
- }
+ Object writeReplace() {
+ return new SimpleEntry<K, V>(this);
}
+ }
- /**
- * An adapter to safely externalize the key iterator.
- */
- final class KeyIterator implements Iterator<K> {
- final Iterator<K> iterator = data.keySet().iterator();
- K current;
+ /** A weigher that enforces that the weight falls within a valid range. */
+ static final class BoundedEntryWeigher<K, V> implements EntryWeigher<K, V>, Serializable {
+ static final long serialVersionUID = 1;
+ final EntryWeigher<? super K, ? super V> weigher;
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
+ BoundedEntryWeigher(EntryWeigher<? super K, ? super V> weigher) {
+ checkNotNull(weigher);
+ this.weigher = weigher;
+ }
- @Override
- public K next() {
- current = iterator.next();
- return current;
- }
+ @Override
+ public int weightOf(K key, V value) {
+ int weight = weigher.weightOf(key, value);
+ checkArgument(weight >= 1);
+ return weight;
+ }
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current);
- current = null;
- }
+ Object writeReplace() {
+ return weigher;
}
+ }
- /**
- * An adapter to safely externalize the values.
- */
- final class Values extends AbstractCollection<V> {
+ /** A queue that discards all additions and is always empty. */
+ static final class DiscardingQueue extends AbstractQueue<Object> {
+ @Override public boolean add(Object e) { return true; }
+ @Override public boolean offer(Object e) { return true; }
+ @Override public Object poll() { return null; }
+ @Override public Object peek() { return null; }
+ @Override public int size() { return 0; }
+ @Override public Iterator<Object> iterator() { return emptyList().iterator(); }
+ }
- @Override
- public int size() {
- return ConcurrentLinkedHashMap.this.size();
- }
+ /** A listener that ignores all notifications. */
+ enum DiscardingListener implements EvictionListener<Object, Object> {
+ INSTANCE;
- @Override
- public void clear() {
- ConcurrentLinkedHashMap.this.clear();
- }
+ @Override public void onEviction(Object key, Object value) {}
+ }
- @Override
- public Iterator<V> iterator() {
- return new ValueIterator();
- }
+ /* ---------------- Serialization Support -------------- */
- @Override
- public boolean contains(Object o) {
- return containsValue(o);
- }
- }
+ static final long serialVersionUID = 1;
- /**
- * An adapter to safely externalize the value iterator.
- */
- final class ValueIterator implements Iterator<V> {
- final Iterator<Node<K, V>> iterator = data.values().iterator();
- Node<K, V> current;
+ Object writeReplace() {
+ return new SerializationProxy<K, V>(this);
+ }
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
+ private void readObject(ObjectInputStream stream) throws InvalidObjectException {
+ throw new InvalidObjectException("Proxy required");
+ }
- @Override
- public V next() {
- current = iterator.next();
- return current.getValue();
- }
+ /**
+ * A proxy that is serialized instead of the map. The page-replacement
+ * algorithm's data structures are not serialized so the deserialized
+ * instance contains only the entries. This is acceptable as caches hold
+ * transient data that is recomputable and serialization would tend to be
+ * used as a fast warm-up process.
+ */
+ static final class SerializationProxy<K, V> implements Serializable {
+ final EntryWeigher<? super K, ? super V> weigher;
+ final EvictionListener<K, V> listener;
+ final int concurrencyLevel;
+ final Map<K, V> data;
+ final long capacity;
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current.key);
- current = null;
- }
+ SerializationProxy(ConcurrentLinkedHashMap<K, V> map) {
+ concurrencyLevel = map.concurrencyLevel;
+ data = new HashMap<K, V>(map);
+ capacity = map.capacity.get();
+ listener = map.listener;
+ weigher = map.weigher;
}
- /**
- * An adapter to safely externalize the entries.
- */
- final class EntrySet extends AbstractSet<Entry<K, V>> {
- final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
+ Object readResolve() {
+ ConcurrentLinkedHashMap<K, V> map = new Builder<K, V>()
+ .concurrencyLevel(concurrencyLevel)
+ .maximumWeightedCapacity(capacity)
+ .listener(listener)
+ .weigher(weigher)
+ .build();
+ map.putAll(data);
+ return map;
+ }
- @Override
- public int size() {
- return map.size();
- }
+ static final long serialVersionUID = 1;
+ }
- @Override
- public void clear() {
- map.clear();
- }
+ /* ---------------- Builder -------------- */
- @Override
- public Iterator<Entry<K, V>> iterator() {
- return new EntryIterator();
- }
+ /**
+ * A builder that creates {@link ConcurrentLinkedHashMap} instances. It
+ * provides a flexible approach for constructing customized instances with
+ * a named parameter syntax. It can be used in the following manner:
+ * <pre>{@code
+ * ConcurrentMap<Vertex, Set<Edge>> graph = new Builder<Vertex, Set<Edge>>()
+ * .maximumWeightedCapacity(5000)
+ * .weigher(Weighers.<Edge>set())
+ * .build();
+ * }</pre>
+ */
+ public static final class Builder<K, V> {
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
- @Override
- public boolean contains(Object obj) {
- if (!(obj instanceof Entry<?, ?>)) {
- return false;
- }
- Entry<?, ?> entry = (Entry<?, ?>) obj;
- Node<K, V> node = map.data.get(entry.getKey());
- return (node != null) && (node.getValue().equals(entry.getValue()));
- }
+ EvictionListener<K, V> listener;
+ EntryWeigher<? super K, ? super V> weigher;
- @Override
- public boolean add(Entry<K, V> entry) {
- return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null);
- }
+ int concurrencyLevel;
+ int initialCapacity;
+ long capacity;
- @Override
- public boolean remove(Object obj) {
- if (!(obj instanceof Entry<?, ?>)) {
- return false;
- }
- Entry<?, ?> entry = (Entry<?, ?>) obj;
- return map.remove(entry.getKey(), entry.getValue());
- }
+ @SuppressWarnings("unchecked")
+ public Builder() {
+ capacity = -1;
+ weigher = Weighers.entrySingleton();
+ initialCapacity = DEFAULT_INITIAL_CAPACITY;
+ concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL;
+ listener = (EvictionListener<K, V>) DiscardingListener.INSTANCE;
}
/**
- * An adapter to safely externalize the entry iterator.
+ * Specifies the initial capacity of the hash table (default <tt>16</tt>).
+ * This is the number of key-value pairs that the hash table can hold
+ * before a resize operation is required.
+ *
+ * @param initialCapacity the initial capacity used to size the hash table
+ * to accommodate this many entries.
+ * @throws IllegalArgumentException if the initialCapacity is negative
*/
- final class EntryIterator implements Iterator<Entry<K, V>> {
- final Iterator<Node<K, V>> iterator = data.values().iterator();
- Node<K, V> current;
-
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
-
- @Override
- public Entry<K, V> next() {
- current = iterator.next();
- return new WriteThroughEntry(current);
- }
-
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current.key);
- current = null;
- }
+ public Builder<K, V> initialCapacity(int initialCapacity) {
+ checkArgument(initialCapacity >= 0);
+ this.initialCapacity = initialCapacity;
+ return this;
}
/**
- * An entry that allows updates to write through to the map.
+ * Specifies the maximum weighted capacity to coerce the map to and may
+ * exceed it temporarily.
+ *
+ * @param capacity the weighted threshold to bound the map by
+ * @throws IllegalArgumentException if the maximumWeightedCapacity is
+ * negative
*/
- final class WriteThroughEntry extends SimpleEntry<K, V> {
- static final long serialVersionUID = 1;
-
- WriteThroughEntry(Node<K, V> node) {
- super(node.key, node.getValue());
- }
-
- @Override
- public V setValue(V value) {
- put(getKey(), value);
- return super.setValue(value);
- }
-
- Object writeReplace() {
- return new SimpleEntry<K, V>(this);
- }
+ public Builder<K, V> maximumWeightedCapacity(long capacity) {
+ checkArgument(capacity >= 0);
+ this.capacity = capacity;
+ return this;
}
/**
- * A weigher that enforces that the weight falls within a valid range.
+ * Specifies the estimated number of concurrently updating threads. The
+ * implementation performs internal sizing to try to accommodate this many
+ * threads (default <tt>16</tt>).
+ *
+ * @p
<TRUNCATED>