You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by st...@apache.org on 2015/08/06 14:54:36 UTC

svn commit: r1694489 [2/2] - in /subversion/trunk: ./ subversion/include/private/ subversion/libsvn_fs_fs/ subversion/libsvn_fs_x/ subversion/libsvn_subr/ subversion/tests/libsvn_subr/

Modified: subversion/trunk/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/trunk/subversion/libsvn_subr/cache-membuffer.c?rev=1694489&r1=1694488&r2=1694489&view=diff
==============================================================================
--- subversion/trunk/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/trunk/subversion/libsvn_subr/cache-membuffer.c Thu Aug  6 12:54:36 2015
@@ -28,12 +28,14 @@
 #include "svn_pools.h"
 #include "svn_checksum.h"
 #include "svn_private_config.h"
+#include "svn_hash.h"
 #include "svn_string.h"
 #include "svn_sorts.h"  /* get the MIN macro */
 
 #include "private/svn_atomic.h"
 #include "private/svn_dep_compat.h"
 #include "private/svn_mutex.h"
+#include "private/svn_subr_private.h"
 #include "private/svn_string_private.h"
 
 #include "cache.h"
@@ -117,6 +119,12 @@
  * key length stored in the entry acts as an additional offset to find the
  * actual item.
  *
+ * Most keys are 16 bytes or less.  We use the prefix indexes returned by
+ * a prefix_pool_t instance to uniquely identify the prefix in that case.
+ * Then the combination of prefix index and key stored in the fingerprint
+ * is then unique, too, and can never conflict.  No full key construction,
+ * storage and comparison is needed in that case.
+ *
  * All access to the cached data needs to be serialized. Because we want
  * to scale well despite that bottleneck, we simply segment the cache into
  * a number of independent caches (segments). Items will be multiplexed based
@@ -189,6 +197,10 @@
  * entries with the same entry key. However unlikely, though, two different
  * full keys (see full_key_t) may have the same entry key.  That is a
  * collision and at most one of them can be stored in the cache at any time.
+ *
+ * If the prefix is shared, which implies that the variable key part is no
+ * longer than 16 bytes, then there is a 1:1 mapping between full key and
+ * entry key.
  */
 typedef struct entry_key_t
 {
@@ -196,24 +208,182 @@ typedef struct entry_key_t
   apr_uint64_t fingerprint[2];
 
   /* Length of the full key.  This value is aligned to ITEM_ALIGNMENT to
-   * make sure the subsequent item content is properly aligned. */
+   * make sure the subsequent item content is properly aligned.  If 0,
+   * PREFIX_KEY is implied to be != NO_INDEX. */
   apr_size_t key_len;
+
+  /* Unique index of the shared key prefix, i.e. it's index within the
+   * prefix pool (see prefix_pool_t).  NO_INDEX if the key prefix is not
+   * shared, otherwise KEY_LEN==0 is implied. */
+  apr_uint32_t prefix_idx;
 } entry_key_t;
 
 /* A full key, i.e. the combination of the cache's key prefix with some
  * dynamic part appended to it.  It also contains its ENTRY_KEY.
+ *
+ * If the ENTRY_KEY has a 1:1 mapping to the FULL_KEY, then the latter
+ * will be empty and remains unused.
  */
 typedef struct full_key_t
 {
   /* Reduced form identifying the cache entry (if such an entry exists). */
   entry_key_t entry_key;
 
-  /* This contains the full combination.  Note that the SIZE element may
-   * be larger than ENTRY_KEY.KEY_LEN, but only the latter determines the
-   * valid key size. */
+  /* If ENTRY_KEY is not a 1:1 mapping of the prefix + dynamic key
+   * combination,  then this contains the full combination.  Note that the
+   * SIZE element may be larger than ENTRY_KEY.KEY_LEN, but only the latter
+   * determines the valid key size. */
   svn_membuf_t full_key;
 } full_key_t;
 
+/* A limited capacity, thread-safe pool of unique C strings.  Operations on
+ * this data structure are defined by prefix_pool_* functions.  The only
+ * "public" member is VALUES (r/o access only).
+ */
+typedef struct prefix_pool_t
+{
+  /* Map C string to a pointer into VALUES with the same contents. */
+  apr_hash_t *map;
+
+  /* Pointer to an array of strings. These are the contents of this pool
+   * and each one of them is referenced by MAP.  Valid indexes are 0 to
+   * VALUES_USED - 1.  May be NULL if VALUES_MAX is 0. */
+  const char **values;
+
+  /* Number of used entries that VALUES may have. */
+  apr_uint32_t values_max;
+
+  /* Number of used entries in VALUES.  Never exceeds VALUES_MAX. */
+  apr_uint32_t values_used;
+
+  /* Maximum number of bytes to allocate. */
+  apr_size_t bytes_max;
+
+  /* Number of bytes currently allocated.  Should not exceed BYTES_MAX but
+   * the implementation may . */
+  apr_size_t bytes_used;
+
+  /* The serialization object. */
+  svn_mutex__t *mutex;
+} prefix_pool_t;
+
+/* Set *PREFIX_POOL to a new instance that tries to limit allocation to
+ * BYTES_MAX bytes.  If MUTEX_REQUIRED is set and multi-threading is
+ * supported, serialize all access to the new instance.  Allocate the
+ * object from *RESULT_POOL. */
+static svn_error_t *
+prefix_pool_create(prefix_pool_t **prefix_pool,
+                   apr_size_t bytes_max,
+                   svn_boolean_t mutex_required,
+                   apr_pool_t *result_pool)
+{
+  enum
+    {
+      /* With 56 byes of overhead under 64 bits, we will probably never get
+       * substantially below this.  If we accidentally do, we will simply
+       * run out of entries in the VALUES array before running out of
+       * allocated memory. */
+      ESTIMATED_BYTES_PER_ENTRY = 120,
+    };
+
+  /* Number of entries we are going to support. */
+  apr_size_t capacity = MIN(APR_UINT32_MAX,
+                            bytes_max / ESTIMATED_BYTES_PER_ENTRY);
+
+  /* Construct the result struct. */
+  prefix_pool_t *result = apr_pcalloc(result_pool, sizeof(*result));
+  result->map = svn_hash__make(result_pool);
+
+  result->values = capacity
+                 ? apr_pcalloc(result_pool, capacity * sizeof(const char *))
+                 : NULL;
+  result->values_max = (apr_uint32_t)capacity;
+  result->values_used = 0;
+
+  result->bytes_max = bytes_max;
+  result->bytes_used = capacity * sizeof(svn_membuf_t);
+
+  SVN_ERR(svn_mutex__init(&result->mutex, mutex_required, result_pool));
+
+  /* Done. */
+  *prefix_pool = result;
+  return SVN_NO_ERROR;
+}
+
+/* Set *PREFIX_IDX to the offset in PREFIX_POOL->VALUES that contains the
+ * value PREFIX.  If none exists, auto-insert it.  If we can't due to
+ * capacity exhaustion, set *PREFIX_IDX to NO_INDEX.
+ * To be called by prefix_pool_get() only. */
+static svn_error_t *
+prefix_pool_get_internal(apr_uint32_t *prefix_idx,
+                         prefix_pool_t *prefix_pool,
+                         const char *prefix)
+{
+  enum
+    {
+      /* Size of an hash entry plus (max.) APR alignment loss.
+       *
+       * This may be slightly off if e.g. APR changes its internal data
+       * structures but that will translate in just a few percent (~10%)
+       * over-allocation.  Memory consumption will still be capped.
+       */
+      OVERHEAD = 40 + 8
+    };
+
+  const char **value;
+  apr_size_t prefix_len = strlen(prefix);
+  apr_size_t bytes_needed;
+  apr_pool_t *pool;
+
+  /* Lookup.  If we already know that prefix, return its index. */
+  value = apr_hash_get(prefix_pool->map, prefix, prefix_len);
+  if (value != NULL)
+    {
+      *prefix_idx = value - prefix_pool->values;
+      return SVN_NO_ERROR;
+    }
+
+  /* Capacity checks. */
+  if (prefix_pool->values_used == prefix_pool->values_max)
+    {
+      *prefix_idx = NO_INDEX;
+      return SVN_NO_ERROR;
+    }
+
+  bytes_needed = prefix_len + 1 + OVERHEAD;
+  if (prefix_pool->bytes_used + bytes_needed > prefix_pool->values_max)
+    {
+      *prefix_idx = NO_INDEX;
+      return SVN_NO_ERROR;
+    }
+
+  /* Add new entry. */
+  pool = apr_hash_pool_get(prefix_pool->map);
+
+  value = &prefix_pool->values[prefix_pool->values_used];
+  *value = apr_pstrndup(pool, prefix, prefix_len + 1);
+  apr_hash_set(prefix_pool->map, *value, prefix_len, value);
+
+  *prefix_idx = prefix_pool->values_used;
+  ++prefix_pool->values_used;
+  prefix_pool->bytes_used += bytes_needed;
+
+  return SVN_NO_ERROR;
+}
+
+/* Thread-safe wrapper around prefix_pool_get_internal. */
+static svn_error_t *
+prefix_pool_get(apr_uint32_t *prefix_idx,
+                prefix_pool_t *prefix_pool,
+                const char *prefix)
+{
+  SVN_MUTEX__WITH_LOCK(prefix_pool->mutex,
+                       prefix_pool_get_internal(prefix_idx, prefix_pool,
+                                                prefix));
+
+  return SVN_NO_ERROR;
+}
+
 /* Debugging / corruption detection support.
  * If you define this macro, the getter functions will performed expensive
  * checks on the item data, requested keys and entry types. If there is
@@ -263,13 +433,12 @@ typedef struct entry_tag_t
 /* Initialize all members of TAG except for the content hash.
  */
 static svn_error_t *store_key_part(entry_tag_t *tag,
-                                   const full_key_t *prefix_key,
+                                   const char *prefix,
                                    const void *key,
                                    apr_size_t key_len,
-                                   apr_pool_t *pool)
+                                   apr_pool_t *scratch_pool)
 {
   svn_checksum_t *checksum;
-  const char *prefix = prefix_key->full_key.data;
   apr_size_t prefix_len = strlen(prefix);
 
   if (prefix_len > sizeof(tag->prefix_tail))
@@ -280,12 +449,16 @@ static svn_error_t *store_key_part(entry
 
   SVN_ERR(svn_checksum(&checksum,
                        svn_checksum_md5,
+                       prefix,
+                       strlen(prefix),
+                       scratch_pool));
+  memcpy(tag->prefix_hash, checksum->digest, sizeof(tag->prefix_hash));
+
+  SVN_ERR(svn_checksum(&checksum,
+                       svn_checksum_md5,
                        key,
                        key_len,
-                       pool));
-
-  memcpy(tag->prefix_hash, prefix_key->entry_key.fingerprint,
-         sizeof(tag->prefix_hash));
+                       scratch_pool));
   memcpy(tag->key_hash, checksum->digest, sizeof(tag->key_hash));
 
   memset(tag->prefix_tail, 0, sizeof(tag->key_hash));
@@ -346,7 +519,7 @@ static svn_error_t* assert_equal_tags(co
   entry_tag_t *tag = &_tag;                                      \
   if (key)                                                       \
     SVN_ERR(store_key_part(tag,                                  \
-                           &cache->prefix,                       \
+                           get_prefix_key(cache),                \
                            key,                                  \
                            cache->key_len == APR_HASH_KEY_STRING \
                                ? strlen((const char *) key)      \
@@ -521,6 +694,12 @@ struct svn_membuffer_t
      and that all segments must / will report the same values here. */
   apr_uint32_t segment_count;
 
+  /* Collection of prefixes shared among all instances accessing the
+   * same membuffer cache backend.  If a prefix is contained in this
+   * pool then all cache instances using an equal prefix must actually
+   * use the one stored in this pool. */
+  prefix_pool_t *prefix_pool;
+
   /* The dictionary, GROUP_SIZE * (group_count + spare_group_count)
    * entries long.  Never NULL.
    */
@@ -1186,6 +1365,7 @@ entry_keys_match(const entry_key_t *lhs,
 {
   return (lhs->fingerprint[0] == rhs->fingerprint[0])
       && (lhs->fingerprint[1] == rhs->fingerprint[1])
+      && (lhs->prefix_idx == rhs->prefix_idx)
       && (lhs->key_len == rhs->key_len);
 }
 
@@ -1248,7 +1428,8 @@ find_entry(svn_membuffer_t *cache,
             /* If we want to preserve it, check that it is actual a match. */
             if (!find_empty)
               {
-                /* If there is no full key to compare, we are done. */
+                /* If the full key is fully defined in prefix_id & mangeled
+                 * key, we are done. */
                 if (!entry->key.key_len)
                   return entry;
 
@@ -1675,6 +1856,7 @@ svn_cache__membuffer_cache_create(svn_me
                                   apr_pool_t *pool)
 {
   svn_membuffer_t *c;
+  prefix_pool_t *prefix_pool;
 
   apr_uint32_t seg;
   apr_uint32_t group_count;
@@ -1684,6 +1866,12 @@ svn_cache__membuffer_cache_create(svn_me
   apr_uint64_t data_size;
   apr_uint64_t max_entry_size;
 
+  /* Allocate 1% of the cache capacity to the prefix string pool.
+   */
+  SVN_ERR(prefix_pool_create(&prefix_pool, total_size / 100, thread_safe,
+                             pool));
+  total_size -= total_size / 100;
+
   /* Limit the total size (only relevant if we can address > 4GB)
    */
 #if APR_SIZEOF_VOIDP > 4
@@ -1794,14 +1982,18 @@ svn_cache__membuffer_cache_create(svn_me
       /* allocate buffers and initialize cache members
        */
       c[seg].segment_count = (apr_uint32_t)segment_count;
+      c[seg].prefix_pool = prefix_pool;
 
       c[seg].group_count = main_group_count;
       c[seg].spare_group_count = spare_group_count;
       c[seg].first_spare_group = NO_INDEX;
       c[seg].max_spare_used = 0;
 
-      c[seg].directory = apr_pcalloc(pool,
-                                     group_count * sizeof(entry_group_t));
+      /* Allocate but don't clear / zero the directory because it would add
+         significantly to the server start-up time if the caches are large.
+         Group initialization will take care of that in stead. */
+      c[seg].directory = apr_palloc(pool,
+                                    group_count * sizeof(entry_group_t));
 
       /* Allocate and initialize directory entries as "not initialized",
          hence "unused" */
@@ -2581,11 +2773,11 @@ typedef struct svn_membuffer_cache_t
    */
   svn_cache__deserialize_func_t deserializer;
 
-  /* Prepend this byte sequence to any key passed to us.
+  /* Prepend this to any key passed to us.
    * This makes our keys different from all keys used by svn_membuffer_cache_t
    * instances that we don't want to share cached data with.
    */
-  full_key_t prefix;
+  entry_key_t prefix;
 
   /* length of the keys that will be passed to us through the
    * svn_cache_t interface. May be APR_HASH_KEY_STRING.
@@ -2604,6 +2796,15 @@ typedef struct svn_membuffer_cache_t
   svn_mutex__t *mutex;
 } svn_membuffer_cache_t;
 
+/* Return the prefix key used by CACHE. */
+static const char *
+get_prefix_key(const svn_membuffer_cache_t *cache)
+{
+  return (cache->prefix.prefix_idx == NO_INDEX
+       ? cache->combined_key.full_key.data
+       : cache->membuffer->prefix_pool->values[cache->prefix.prefix_idx]);
+}
+
 /* Basically calculate a hash value for KEY of length KEY_LEN, combine it
  * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
  * This could replace combine_key() entirely but we actually use it only
@@ -2616,7 +2817,7 @@ combine_long_key(svn_membuffer_cache_t *
 {
   apr_uint32_t *digest_buffer;
   char *key_copy;
-  apr_size_t prefix_len = cache->prefix.entry_key.key_len;
+  apr_size_t prefix_len = cache->prefix.key_len;
   apr_size_t aligned_key_len;
 
   /* handle variable-length keys */
@@ -2640,9 +2841,9 @@ combine_long_key(svn_membuffer_cache_t *
 
   /* Combine with prefix. */
   cache->combined_key.entry_key.fingerprint[0]
-    ^= cache->prefix.entry_key.fingerprint[0];
+    ^= cache->prefix.fingerprint[0];
   cache->combined_key.entry_key.fingerprint[1]
-    ^= cache->prefix.entry_key.fingerprint[1];
+    ^= cache->prefix.fingerprint[1];
 }
 
 /* Basically calculate a hash value for KEY of length KEY_LEN, combine it
@@ -2653,47 +2854,55 @@ combine_key(svn_membuffer_cache_t *cache
             const void *key,
             apr_ssize_t key_len)
 {
-  /* short, fixed-size keys are the most common case */
-  if (key_len != APR_HASH_KEY_STRING && key_len <= 16)
-    {
-      const apr_size_t prefix_len = cache->prefix.entry_key.key_len;
+  /* copy of *key, padded with 0 */
+  apr_uint64_t data[2];
 
-      /* Copy of *key, padded with 0.
-       * We put it just behind the prefix already copied into the COMBINED_KEY.
-       * The buffer space has been allocated when the cache was created. */
-      apr_uint64_t *data = (void *)((char *)cache->combined_key.full_key.data + 
-                                    prefix_len);
-      assert(prefix_len <= cache->combined_key.full_key.size - 16);
-      cache->combined_key.entry_key.key_len = prefix_len + 16;
+  /* Do we have to compare full keys? */
+  if (cache->prefix.prefix_idx == NO_INDEX)
+    {
+      combine_long_key(cache, key, key_len);
+      return;
+    }
 
-      data[0] = 0;
+  /* short, fixed-size keys are the most common case */
+  if (key_len == 16)
+    {
+      memcpy(data, key, 16);
+    }
+  else if (key_len == 8)
+    {
+      memcpy(data, key, 8);
       data[1] = 0;
-      memcpy(data, key, key_len);
-
-      /* Scramble key DATA to spread the key space more evenly across the
-       * cache segments and entry buckets.  All of this shall be reversible
-       * to prevent key collisions.  So, we limit ourselves to xor and
-       * permutations.
-       *
-       * As long as we compare the full combined key, the additional
-       * fingerprint collisions introduced by a non-reversible scramble
-       * would simply reduce the cache effectiveness.
-       */
-      data[1] = (data[1] << 27) | (data[1] >> 37);
-      data[1] ^= data[0] & 0xffff;
-      data[0] ^= data[1] & APR_UINT64_C(0xffffffffffff0000);
-
-      /* combine with this cache's namespace */
-      cache->combined_key.entry_key.fingerprint[0]
-        = data[0] ^ cache->prefix.entry_key.fingerprint[0];
-      cache->combined_key.entry_key.fingerprint[1]
-        = data[1] ^ cache->prefix.entry_key.fingerprint[1];
     }
   else
     {
-      /* longer or variably sized keys */
-      combine_long_key(cache, key, key_len);
+      assert(key_len != APR_HASH_KEY_STRING && key_len < 16);
+      data[0] = 0;
+      data[1] = 0;
+      memcpy(data, key, key_len);
     }
+
+  /* Scramble key DATA to spread the key space more evenly across the
+   * cache segments and entry buckets.  All of this shall be reversible
+   * to prevent key collisions.  So, we limit ourselves to xor and
+   * permutations.
+   *
+   * Since the entry key must preserve the full key (prefix and KEY),
+   * the scramble must not introduce KEY collisions.
+   */
+  data[1] = (data[1] << 27) | (data[1] >> 37);
+  data[1] ^= data[0] & 0xffff;
+  data[0] ^= data[1] & APR_UINT64_C(0xffffffffffff0000);
+
+  /* Combine with this cache's prefix.  This is reversible because the
+   * prefix is known through to the respective entry_key element.  So,
+   * knowing entry_key.prefix_id, we can still reconstruct KEY (and the
+   * prefix key).
+   */
+  cache->combined_key.entry_key.fingerprint[0]
+    = data[0] ^ cache->prefix.fingerprint[0];
+  cache->combined_key.entry_key.fingerprint[1]
+    = data[1] ^ cache->prefix.fingerprint[1];
 }
 
 /* Implement svn_cache__vtable_t.get (not thread-safe)
@@ -2940,7 +3149,7 @@ svn_membuffer_cache_get_info(void *cache
 
   /* cache front-end specific data */
 
-  info->id = apr_pstrdup(result_pool, cache->prefix.full_key.data);
+  info->id = apr_pstrdup(result_pool, get_prefix_key(cache));
 
   /* collect info from shared cache back-end */
 
@@ -3129,6 +3338,7 @@ svn_cache__create_membuffer_cache(svn_ca
                                   const char *prefix,
                                   apr_uint32_t priority,
                                   svn_boolean_t thread_safe,
+                                  svn_boolean_t short_lived,
                                   apr_pool_t *result_pool,
                                   apr_pool_t *scratch_pool)
 {
@@ -3159,28 +3369,50 @@ svn_cache__create_membuffer_cache(svn_ca
   prefix_orig_len = strlen(prefix) + 1;
   prefix_len = ALIGN_VALUE(prefix_orig_len);
 
-  svn_membuf__create(&cache->prefix.full_key, prefix_len, result_pool);
-  memcpy((char *)cache->prefix.full_key.data, prefix, prefix_orig_len);
-  memset((char *)cache->prefix.full_key.data + prefix_orig_len, 0,
-         prefix_len - prefix_orig_len);
-
   /* Construct the folded prefix key. */
   SVN_ERR(svn_checksum(&checksum,
                        svn_checksum_md5,
                        prefix,
                        strlen(prefix),
                        scratch_pool));
-  memcpy(cache->prefix.entry_key.fingerprint, checksum->digest,
-         sizeof(cache->prefix.entry_key.fingerprint));
-  cache->prefix.entry_key.key_len = prefix_len;
-
-  /* Initialize the combined key. Pre-allocate some extra room in the full
-   * key such that we probably don't need to re-alloc. */
-  cache->combined_key.entry_key = cache->prefix.entry_key;
-  svn_membuf__create(&cache->combined_key.full_key, prefix_len + 200,
-                     result_pool);
-  memcpy(cache->combined_key.full_key.data, cache->prefix.full_key.data,
-         prefix_len);
+  memcpy(cache->prefix.fingerprint, checksum->digest,
+         sizeof(cache->prefix.fingerprint));
+  cache->prefix.key_len = prefix_len;
+
+  /* Fix-length keys of up to 16 bytes may be handled without storing the
+   * full key separately for each item. */
+  if (   (klen != APR_HASH_KEY_STRING)
+      && (klen <= sizeof(cache->combined_key.entry_key.fingerprint))
+      && !short_lived)
+    SVN_ERR(prefix_pool_get(&cache->prefix.prefix_idx,
+                            membuffer->prefix_pool,
+                            prefix));
+  else
+    cache->prefix.prefix_idx = NO_INDEX;
+
+  /* If key combining is not guaranteed to produce unique results, we have
+   * to handle full keys.  Otherwise, leave it NULL. */
+  if (cache->prefix.prefix_idx == NO_INDEX)
+    {
+      /* Initialize the combined key. Pre-allocate some extra room in the
+       * full key such that we probably don't need to re-alloc. */
+      cache->combined_key.entry_key = cache->prefix;
+      svn_membuf__create(&cache->combined_key.full_key, prefix_len + 200,
+                         result_pool);
+      memcpy((char *)cache->combined_key.full_key.data, prefix,
+             prefix_orig_len);
+      memset((char *)cache->combined_key.full_key.data + prefix_orig_len, 0,
+             prefix_len - prefix_orig_len);
+    }
+  else
+    {
+      /* Initialize the combined key.  We will never have the full combined
+       * key, so leave it NULL and set its length to 0 to prevent access to
+       * it.  Keep the fingerprint 0 as well b/c it will always be set anew
+       * by combine_key(). */
+      cache->combined_key.entry_key.prefix_idx = cache->prefix.prefix_idx;
+      cache->combined_key.entry_key.key_len = 0;
+    }
 
   /* initialize the generic cache wrapper
    */

Modified: subversion/trunk/subversion/tests/libsvn_subr/cache-test.c
URL: http://svn.apache.org/viewvc/subversion/trunk/subversion/tests/libsvn_subr/cache-test.c?rev=1694489&r1=1694488&r2=1694489&view=diff
==============================================================================
--- subversion/trunk/subversion/tests/libsvn_subr/cache-test.c (original)
+++ subversion/trunk/subversion/tests/libsvn_subr/cache-test.c Thu Aug  6 12:54:36 2015
@@ -221,6 +221,7 @@ test_membuffer_cache_basic(apr_pool_t *p
                                             "cache:",
                                             SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY,
                                             FALSE,
+                                            FALSE,
                                             pool, pool));
 
   return basic_cache_test(cache, FALSE, pool);
@@ -278,6 +279,7 @@ test_membuffer_serializer_error_handling
                                             "cache:",
                                             SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY,
                                             FALSE,
+                                            FALSE,
                                             pool, pool));
 
   SVN_ERR(svn_cache__set(cache, "twenty", &twenty, pool));
@@ -307,6 +309,7 @@ test_membuffer_serializer_error_handling
                                             "cache:",
                                             SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY,
                                             FALSE,
+                                            FALSE,
                                             pool, pool));
 
   /* Store one entry in cache. */
@@ -393,6 +396,7 @@ test_membuffer_cache_clearing(apr_pool_t
                                             "cache:",
                                             SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY,
                                             FALSE,
+                                            FALSE,
                                             pool, pool));
 
   /* Initially, the cache is empty. */
@@ -440,6 +444,51 @@ test_membuffer_cache_clearing(apr_pool_t
   return SVN_NO_ERROR;
 }
 
+/* Implements svn_iter_apr_hash_cb_t. */
+static svn_error_t *
+null_cache_iter_func(void *baton,
+                     const void *key,
+                     apr_ssize_t klen,
+                     void *val,
+                     apr_pool_t *pool)
+{
+  /* shall never be called */
+  return svn_error_create(SVN_ERR_TEST_FAILED, NULL, "should not be called");
+}
+
+static svn_error_t *
+test_null_cache(apr_pool_t *pool)
+{
+  svn_boolean_t found, done;
+  int *data = NULL;
+  svn_cache__info_t info;
+
+  svn_cache__t *cache;
+  SVN_ERR(svn_cache__create_null(&cache, "test-dummy", pool));
+
+  /* Can't cache anything. */
+  SVN_TEST_ASSERT(svn_cache__is_cachable(cache, 0) == FALSE);
+  SVN_TEST_ASSERT(svn_cache__is_cachable(cache, 1) == FALSE);
+
+  /* No point in adding data. */
+  SVN_ERR(svn_cache__set(cache, "data", &data, pool));
+  SVN_ERR(svn_cache__get((void **)&data, &found, cache, "data", pool));
+  SVN_TEST_ASSERT(found == FALSE);
+
+  SVN_ERR(svn_cache__has_key(&found, cache, "data", pool));
+  SVN_TEST_ASSERT(found == FALSE);
+
+  /* Iteration "works" but is a no-op. */
+  SVN_ERR(svn_cache__iter(&done, cache, null_cache_iter_func, NULL, pool));
+  SVN_TEST_ASSERT(done);
+
+  /* It shall know its name. */
+  SVN_ERR(svn_cache__get_info(cache, &info, TRUE, pool));
+  SVN_TEST_STRING_ASSERT(info.id, "test-dummy");
+
+  return SVN_NO_ERROR;
+}
+
 static svn_error_t *
 test_membuffer_unaligned_string_keys(apr_pool_t *pool)
 {
@@ -462,7 +511,7 @@ test_membuffer_unaligned_string_keys(apr
   SVN_ERR(svn_cache__create_membuffer_cache(
             &cache, membuffer, serialize_revnum, deserialize_revnum,
             APR_HASH_KEY_STRING, unaligned_prefix,
-            SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY, FALSE,
+            SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY, FALSE, FALSE,
             pool, pool));
 
   SVN_ERR(svn_cache__set(cache, unaligned_key, &fifty, pool));
@@ -513,7 +562,7 @@ test_membuffer_unaligned_fixed_keys(apr_
             &cache, membuffer, serialize_revnum, deserialize_revnum,
             8 /* klen*/,
             unaligned_prefix,
-            SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY, FALSE,
+            SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY, FALSE, FALSE,
             pool, pool));
 
   SVN_ERR(svn_cache__set(cache, unaligned_key, &fifty, pool));
@@ -541,6 +590,7 @@ test_membuffer_unaligned_fixed_keys(apr_
   return SVN_NO_ERROR;
 }
 
+
 /* The test table.  */
 
 static int max_threads = 1;
@@ -560,6 +610,8 @@ static struct svn_test_descriptor_t test
                    "test for error handling in membuffer svn_cache"),
     SVN_TEST_PASS2(test_membuffer_cache_clearing,
                    "test clearing a membuffer svn_cache"),
+    SVN_TEST_PASS2(test_null_cache,
+                   "basic null svn_cache test"),
     SVN_TEST_PASS2(test_membuffer_unaligned_string_keys,
                    "test membuffer cache with unaligned string keys"),
     SVN_TEST_PASS2(test_membuffer_unaligned_fixed_keys,