You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by st...@apache.org on 2014/02/13 23:05:27 UTC

svn commit: r1568062 - /subversion/trunk/subversion/libsvn_subr/cache-membuffer.c

Author: stefan2
Date: Thu Feb 13 22:05:26 2014
New Revision: 1568062

URL: http://svn.apache.org/r1568062
Log:
Membuffer cache hit counter update done right:  introduce a separate
per-segment mutex for counter updates.  Since the critical section
is very tight, there is little actual lock contention and the sync.
overhead is virtually non-existent (tested with 100 concurrent threads
on one and two-socket systems).

* subversion/libsvn_subr/cache-membuffer.c
  (): Remove the comments about racy hit counter handling.
  (svn_membuffer_t): Mention that our profining counters are not
                     thread-safe and that we don't care.
  (drop_entry,
   let_entry_age): Since hit counter updates are now synchronized,
                   there can't be any underflows in the global counters
                   anymore.
  (svn_cache__membuffer_cache_create): Initialize the new mutex.
  (increment_hit_counters): Formally return an svn_error_code to make
                            this function usable with our mutex macro.
  (membuffer_cache_get_internal,
   membuffer_cache_has_key_internal,
   membuffer_cache_get_partial_internal,
   membuffer_cache_set_partial_internal): Synchronize hit counter updates.

Modified:
    subversion/trunk/subversion/libsvn_subr/cache-membuffer.c

Modified: subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_subr/cache-membuffer.c?rev=1568062&r1=1568061&r2=1568062&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/trunk/subversion/libsvn_subr/cache-membuffer.c Thu Feb 13 22:05:26 2014
@@ -98,12 +98,6 @@
  * with new entries. For details on the fine-tuning involved, see the
  * comments in ensure_data_insertable_l2().
  *
- * Reads will update hit counts without synchronization, i.e. some updates
- * will be lost and mostly so on the segment-global counters because read
- * conflicts are more common on them.  I.e. sum(entry.hits) > cache->hits.
- * Thus, we must check for underflows when removing or aging an entry and
- * update the segment-global hit counter.
- * 
  * Due to the randomized mapping of keys to entry groups, some groups may
  * overflow.  In that case, there are spare groups that can be chained to
  * an already used group to extend it.
@@ -556,17 +550,23 @@ struct svn_membuffer_t
 
 
   /* Total number of calls to membuffer_cache_get.
-   * Purely statistical information that may be used for profiling.
+   * Purely statistical information that may be used for profiling only.
+   * Updates are not synchronized and values may be nonsensicle on some
+   * platforms.
    */
   apr_uint64_t total_reads;
 
   /* Total number of calls to membuffer_cache_set.
-   * Purely statistical information that may be used for profiling.
+   * Purely statistical information that may be used for profiling only.
+   * Updates are not synchronized and values may be nonsensicle on some
+   * platforms.
    */
   apr_uint64_t total_writes;
 
   /* Total number of hits since the cache's creation.
-   * Purely statistical information that may be used for profiling.
+   * Purely statistical information that may be used for profiling only.
+   * Updates are not synchronized and values may be nonsensicle on some
+   * platforms.
    */
   apr_uint64_t total_hits;
 
@@ -583,6 +583,12 @@ struct svn_membuffer_t
    */
   svn_boolean_t allow_blocking_writes;
 #endif
+
+  /* All counters that have consistency requirements on thems (currently,
+   * that's only the hit counters) must use this mutex to serialize their
+   * updates.
+   */
+  svn_mutex__t *counter_mutex;
 };
 
 /* Align integer VALUE to the next ITEM_ALIGNMENT boundary.
@@ -968,10 +974,7 @@ drop_entry(svn_membuffer_t *cache, entry
   /* update global cache usage counters
    */
   cache->used_entries--;
-  if (cache->hit_count > entry->hit_count)
-    cache->hit_count -= entry->hit_count;
-  else
-    cache->hit_count = 0;
+  cache->hit_count -= entry->hit_count;
 
   /* extend the insertion window, if the entry happens to border it
    */
@@ -1104,11 +1107,7 @@ let_entry_age(svn_membuffer_t *cache, en
 {
   apr_uint32_t hits_removed = (entry->hit_count + 1) >> 1;
 
-  if (cache->hit_count > hits_removed)
-    cache->hit_count -= hits_removed;
-  else
-    cache->hit_count = 0;
-
+  cache->hit_count -= hits_removed;
   entry->hit_count -= hits_removed;
 }
 
@@ -1786,6 +1785,8 @@ svn_cache__membuffer_cache_create(svn_me
        */
       c[seg].allow_blocking_writes = allow_blocking_writes;
 #endif
+
+      SVN_ERR(svn_mutex__init(&c[seg].counter_mutex, thread_safe, pool));
     }
 
   /* done here
@@ -2012,7 +2013,7 @@ membuffer_cache_set(svn_membuffer_t *cac
 
 /* Count a hit in ENTRY within CACHE.
  */
-static void
+static svn_error_t *
 increment_hit_counters(svn_membuffer_t *cache, entry_t *entry)
 {
   /* To minimize the memory footprint of the cache index, we limit local
@@ -2026,6 +2027,8 @@ increment_hit_counters(svn_membuffer_t *
 
   /* That one is for stats only. */
   cache->total_hits++;
+
+  return SVN_NO_ERROR;
 }
 
 /* Look for the cache entry in group GROUP_INDEX of CACHE, identified
@@ -2084,7 +2087,8 @@ membuffer_cache_get_internal(svn_membuff
 
   /* update hit statistics
    */
-  increment_hit_counters(cache, entry);
+  SVN_MUTEX__WITH_LOCK(cache->counter_mutex,
+                       increment_hit_counters(cache, entry));
   *item_size = entry->size;
 
   return SVN_NO_ERROR;
@@ -2148,7 +2152,8 @@ membuffer_cache_has_key_internal(svn_mem
          again.  While items in L1 are well protected for a while, L2
          items may get evicted soon.  Thus, mark all them as "hit" to give
          them a higher chance of survival. */
-      increment_hit_counters(cache, entry);
+      SVN_MUTEX__WITH_LOCK(cache->counter_mutex,
+                           increment_hit_counters(cache, entry));
 
       *found = TRUE;
     }
@@ -2219,7 +2224,8 @@ membuffer_cache_get_partial_internal(svn
     {
       *found = TRUE;
 
-      increment_hit_counters(cache, entry);
+      SVN_MUTEX__WITH_LOCK(cache->counter_mutex,
+                           increment_hit_counters(cache, entry));
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
@@ -2309,7 +2315,8 @@ membuffer_cache_set_partial_internal(svn
       char *orig_data = data;
       apr_size_t size = entry->size;
 
-      increment_hit_counters(cache, entry);
+      SVN_MUTEX__WITH_LOCK(cache->counter_mutex,
+                           increment_hit_counters(cache, entry));
       cache->total_writes++;
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER



RE: svn commit: r1568062 - /subversion/trunk/subversion/libsvn_subr/cache-membuffer.c

Posted by Bert Huijben <be...@qqmail.nl>.

> -----Original Message-----
> From: stefan2@apache.org [mailto:stefan2@apache.org]
> Sent: donderdag 13 februari 2014 23:05
> To: commits@subversion.apache.org
> Subject: svn commit: r1568062 -
> /subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
> 
> Author: stefan2
> Date: Thu Feb 13 22:05:26 2014
> New Revision: 1568062
> 
> URL: http://svn.apache.org/r1568062
> Log:
> Membuffer cache hit counter update done right:  introduce a separate
> per-segment mutex for counter updates.  Since the critical section
> is very tight, there is little actual lock contention and the sync.
> overhead is virtually non-existent (tested with 100 concurrent threads
> on one and two-socket systems).

Why do you create a mutex when a simple atomic increment as suggested earlier in this thread would work just fine?

An atomic increment is 100% implemented in most CPUs with a single operation while the callback infrastructure you now use for the call, can't be inlined... and even involves  a few DLLs in our usual shared library setup and in some cases even involves kernel operations. The number of system mutexes might even be limited in some implementations.

I think you could replace most of this patch with a few calls to apr_atomic_inc32().

	Bert