You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@activemq.apache.org by ta...@apache.org on 2012/08/01 22:07:55 UTC

svn commit: r1368228 - /activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp

Author: tabish
Date: Wed Aug  1 20:07:54 2012
New Revision: 1368228

URL: http://svn.apache.org/viewvc?rev=1368228&view=rev
Log:
Minor code cleanup and docs added

Modified:
    activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp

Modified: activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp
URL: http://svn.apache.org/viewvc/activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp?rev=1368228&r1=1368227&r2=1368228&view=diff
==============================================================================
--- activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp (original)
+++ activemq/activemq-cpp/trunk/activemq-cpp/src/main/decaf/util/concurrent/locks/AbstractQueuedSynchronizer.cpp Wed Aug  1 20:07:54 2012
@@ -41,6 +41,71 @@ using namespace decaf::internal::util::c
 ////////////////////////////////////////////////////////////////////////////////
 namespace {
 
+    /**
+     * Wait queue node class.
+     *
+     * The wait queue is a variant of a "CLH" (Craig, Landin, and Hagersten)
+     * lock queue. CLH locks are normally used for spinlocks.  We instead use
+     * them for blocking synchronizers, but use the same basic tactic of holding
+     * some of the control information about a thread in the predecessor of its
+     * node.  A "status" field in each node keeps track of whether a thread
+     * should block.  A node is signalled when its predecessor releases.  Each
+     * node of the queue otherwise serves as a specific-notification-style monitor
+     * holding a single waiting thread. The status field does NOT control whether
+     * threads are granted locks etc though.  A thread may try to acquire if it is
+     * first in the queue. But being first does not guarantee success; it only gives
+     * the right to contend.  So the currently released contender thread may need
+     * to rewait.
+     *
+     * To enqueue into a CLH lock, you atomically splice it in as new tail.
+     * To dequeue, you just set the head field.
+     * <pre>
+     *      +------+  prev +-----+       +-----+
+     * head |      | <---- |     | <---- |     |  tail
+     *      +------+       +-----+       +-----+
+     * </pre>
+     *
+     * Insertion into a CLH queue requires only a single atomic operation on "tail", so
+     * there is a simple atomic point of demarcation from unqueued to queued. Similarly,
+     * dequeing involves only updating the "head". However, it takes a bit more work for
+     * nodes to determine who their successors are, in part to deal with possible
+     * cancellation due to timeouts and interrupts.
+     *
+     * The "prev" links (not used in original CLH locks), are mainly needed to handle
+     * cancellation. If a node is cancelled, its successor is (normally) relinked to a
+     * non-cancelled predecessor. For explanation of similar mechanics in the case of
+     * spin locks, see the papers by Scott and Scherer at:
+     *
+     *     http://www.cs.rochester.edu/u/scott/synchronization/
+     *
+     * We also use "next" links to implement blocking mechanics. The thread id for
+     * each node is kept in its own node, so a predecessor signals the next node to
+     * wake up by traversing next link to determine which thread it is.  Determination
+     * of successor must avoid races with newly queued nodes to set the "next" fields
+     * of their predecessors.  This is solved when necessary by checking backwards
+     * from the atomically updated "tail" when a node's successor appears to be null.
+     * (Or, said differently, the next-links are an optimization so that we don't
+     * usually need a backward scan.)
+     *
+     * Cancellation introduces some conservatism to the basic algorithms.  Since we
+     * must poll for cancellation of other nodes, we can miss noticing whether a
+     * cancelled node is ahead or behind us. This is dealt with by always unparking
+     * successors upon cancellation, allowing them to stabilize on a new predecessor,
+     * unless we can identify an uncancelled predecessor who will carry this
+     * responsibility.
+     *
+     * CLH queues need a dummy header node to get started. But we don't create them on
+     * construction, because it would be wasted effort if there is never contention.
+     * Instead, the node is constructed and head and tail pointers are set upon first
+     * contention.
+     *
+     * Threads waiting on Conditions use the same nodes, but use an additional link.
+     * Conditions only need to link nodes in simple (non-concurrent) linked queues
+     * because they are only accessed when exclusively held.  Upon await, a node is
+     * inserted into a condition queue.  Upon signal, the node is transferred to the
+     * main queue.  A special value of status field is used to mark which queue a node
+     * is on.
+     */
     class Node {
     public:
 
@@ -87,39 +152,38 @@ namespace {
         /**
          * Link to predecessor node that current node/thread relies on
          * for checking waitStatus. Assigned during enqueing, and nulled
-         * out.  Also, upon cancellation of a predecessor, we short-circuit
-         * while finding a non-canceled one, which will always exist because
-         * the head node is never canceled: A node becomes head only as a
-         * result of successful acquire. A canceled thread never succeeds
-         * in acquiring, and a thread only cancels itself, not any other node.
+         * out only upon dequeuing.  Also, upon cancellation of a predecessor,
+         * we short-circuit while finding a non-canceled one, which will always
+         * exist because the head node is never canceled: A node becomes head
+         * only as a result of successful acquire. A canceled thread never
+         * succeeds in acquiring, and a thread only cancels itself, not any
+         * other node.
          */
         Node* prev;
 
         /**
-         * Link to the successor node that the current node/thread
-         * unparks upon release. Assigned during enqueuing, adjusted
-         * when bypassing canceled predecessors, and nulled out when
-         * dequeued.  The enq operation does not assign next field of
-         * a predecessor until after attachment, so seeing a NULL next
-         * field does not necessarily mean that node is at end of queue.
-         * However, if a next field appears to be NULL, we can scan
-         * prev's from the tail to double-check.
+         * Link to the successor node that the current node/thread unparks upon
+         * release. Assigned during enqueuing, adjusted when bypassing canceled
+         * predecessors, and nulled out when dequeued.  The enq operation does not
+         * assign next field of a predecessor until after attachment, so seeing a
+         * NULL next field does not necessarily mean that node is at end of queue.
+         * However, if a next field appears to be NULL, we can scan prev's from the
+         * tail to double-check.
          */
         Node* next;
 
         /**
-         * The thread that created this Node as is waiting to acquire the
-         * lock.
+         * The thread that created this Node as is waiting to acquire the lock.
          */
         Thread* thread;
 
         /**
-         * Link to next node waiting on condition, or the special value SHARED.
-         * Because condition queues are accessed only when holding in exclusive
-         * mode, we just need a simple linked queue to hold nodes while they are
-         * waiting on conditions. They are then transferred to the queue to
-         * re-acquire. And because conditions can only be exclusive, we save a
-         * field by using special value to indicate shared mode.
+         * Link to next node waiting on condition, or the special value SHARED. Because
+         * condition queues are accessed only when holding in exclusive mode, we just
+         * need a simple linked queue to hold nodes while they are waiting on conditions.
+         * They are then transferred to the queue to re-acquire. And because conditions
+         * can only be exclusive, we save a field by using special value to indicate
+         * shared mode.
          */
         Node* nextWaiter;
 
@@ -167,6 +231,10 @@ namespace util {
 namespace concurrent {
 namespace locks {
 
+    /**
+     * The real implementation of AQS hidden here to allow code fixes without
+     * altering ABI.
+     */
     class SynchronizerState {
     public:
 
@@ -193,23 +261,25 @@ namespace locks {
         decaf_rwmutex_t rwLock;
 
         /**
-         * Head of the wait queue, lazily initialized.  Except for initialization,
-         * it is modified only via method setHead.  Note: If head exists, its
-         * waitStatus is guaranteed not to be CANCELLED.
+         * Head of the wait queue, lazily initialized.  Except for initialization, it is
+         * modified only via method setHead.  Note: If head exists, its waitStatus is
+         * guaranteed not to be CANCELLED.
          */
         AtomicReference<Node> head;
 
         /**
          * Tail of the wait queue, lazily initialized.  Modified only via method
-         * enq to add new wait node.
+         * enqueue() to add new wait node.
          */
         AtomicReference<Node> tail;
 
     public:
 
-        SynchronizerState(AbstractQueuedSynchronizer* parent) : parent(parent), state(0), rwLock(), head(), tail() {
+        SynchronizerState(AbstractQueuedSynchronizer* parent) :
+            parent(parent), state(0), rwLock(), head(), tail() {
             PlatformThread::createRWMutex(&rwLock);
         }
+
         virtual ~SynchronizerState() {
 
             // Ensure that the destructor waits for other operations to complete.
@@ -237,7 +307,7 @@ namespace locks {
          * @param node
          *      The new node to add.
          */
-        Node* enq(Node* node) {
+        Node* enqueue(Node* node) {
             for (;;) {
                 Node* t = tail.get();
                 if (t == NULL) { // Must initialize
@@ -276,7 +346,7 @@ namespace locks {
                 }
             }
 
-            enq(node);
+            enqueue(node);
             return node;
         }
 
@@ -875,7 +945,7 @@ namespace locks {
              * attempt to set waitStatus fails, wake up to resync (in which
              * case the waitStatus can be transiently and harmlessly wrong).
              */
-            Node* p = enq(node);
+            Node* p = enqueue(node);
             int ws = p->waitStatus;
             if (ws > 0 || !compareAndSetWaitStatus(p, ws, Node::SIGNAL)) {
                 LockSupport::unpark((Thread*)node->thread);
@@ -900,7 +970,7 @@ namespace locks {
          */
         bool transferAfterCancelledWait(Node* node) {
             if (compareAndSetWaitStatus(node, Node::CONDITION, 0)) {
-                enq(node);
+                enqueue(node);
                 return true;
             }
 
@@ -941,7 +1011,7 @@ namespace locks {
                 if (failed) {
                     node->waitStatus = Node::CANCELLED;
                     // Enqueue it even though canceled so that it gets deleted
-                    enq(node);
+                    enqueue(node);
                 }
 
                 throw ex;