You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by st...@apache.org on 2015/11/30 23:15:00 UTC

svn commit: r1717338 - /subversion/trunk/subversion/libsvn_subr/cache-membuffer.c

Author: stefan2
Date: Mon Nov 30 22:15:00 2015
New Revision: 1717338

URL: http://svn.apache.org/viewvc?rev=1717338&view=rev
Log:
Fix insertion of very large items into the membuffer cache on machines
with segmented memory. 

* subversion/libsvn_subr/cache-membuffer.c
  (membuffer_cache_set_internal): With segmented memory, item size + key
                                  length might actually overflow size_t.

Modified:
    subversion/trunk/subversion/libsvn_subr/cache-membuffer.c

Modified: subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_subr/cache-membuffer.c?rev=1717338&r1=1717337&r2=1717338&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/trunk/subversion/libsvn_subr/cache-membuffer.c Mon Nov 30 22:15:00 2015
@@ -2214,14 +2214,29 @@ membuffer_cache_set_internal(svn_membuff
                              apr_pool_t *scratch_pool)
 {
   cache_level_t *level;
-  apr_size_t size = item_size + to_find->entry_key.key_len;
+  apr_size_t size;
 
   /* first, look for a previous entry for the given key */
   entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
 
+  /* Quick size check to make sure arithmetics will work further down
+   * the road. */
+  if (   cache->max_entry_size >= item_size
+      && cache->max_entry_size - item_size >= to_find->entry_key.key_len)
+    {
+      size = item_size + to_find->entry_key.key_len;
+    }
+  else
+    {
+      /* The combination of serialized ITEM and KEY does not fit, so the
+       * the insertion attempt will fail and simply remove any old entry
+       * if that exists. */
+      buffer = NULL;
+    }
+
   /* if there is an old version of that entry and the new data fits into
    * the old spot, just re-use that space. */
-  if (entry && ALIGN_VALUE(entry->size) >= size && buffer)
+  if (entry && buffer && ALIGN_VALUE(entry->size) >= size)
     {
       /* Careful! We need to cast SIZE to the full width of CACHE->DATA_USED
        * lest we run into trouble with 32 bit underflow *not* treated as a