You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by rh...@apache.org on 2015/11/30 11:24:23 UTC

svn commit: r1717223 [31/50] - in /subversion/branches/ra-git: ./ build/ build/ac-macros/ build/generator/ build/generator/templates/ contrib/hook-scripts/ notes/ notes/api-errata/1.9/ notes/move-tracking/ subversion/ subversion/bindings/ctypes-python/...

Modified: subversion/branches/ra-git/subversion/libsvn_subr/base64.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/base64.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/base64.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/base64.c Mon Nov 30 10:24:16 2015
@@ -58,6 +58,7 @@ struct encode_baton {
   unsigned char buf[3];         /* Bytes waiting to be encoded */
   size_t buflen;                /* Number of bytes waiting */
   size_t linelen;               /* Bytes output so far on this line */
+  svn_boolean_t break_lines;
   apr_pool_t *scratch_pool;
 };
 
@@ -214,7 +215,8 @@ encode_data(void *baton, const char *dat
   svn_error_t *err = SVN_NO_ERROR;
 
   /* Encode this block of data and write it out.  */
-  encode_bytes(encoded, data, *len, eb->buf, &eb->buflen, &eb->linelen, TRUE);
+  encode_bytes(encoded, data, *len, eb->buf, &eb->buflen, &eb->linelen,
+               eb->break_lines);
   enclen = encoded->len;
   if (enclen != 0)
     err = svn_stream_write(eb->output, encoded->data, &enclen);
@@ -233,7 +235,8 @@ finish_encoding_data(void *baton)
   svn_error_t *err = SVN_NO_ERROR;
 
   /* Encode a partial group at the end if necessary, and write it out.  */
-  encode_partial_group(encoded, eb->buf, eb->buflen, eb->linelen, TRUE);
+  encode_partial_group(encoded, eb->buf, eb->buflen, eb->linelen,
+                       eb->break_lines);
   enclen = encoded->len;
   if (enclen != 0)
     err = svn_stream_write(eb->output, encoded->data, &enclen);
@@ -247,7 +250,9 @@ finish_encoding_data(void *baton)
 
 
 svn_stream_t *
-svn_base64_encode(svn_stream_t *output, apr_pool_t *pool)
+svn_base64_encode2(svn_stream_t *output,
+                   svn_boolean_t break_lines,
+                   apr_pool_t *pool)
 {
   struct encode_baton *eb = apr_palloc(pool, sizeof(*eb));
   svn_stream_t *stream;
@@ -255,6 +260,7 @@ svn_base64_encode(svn_stream_t *output,
   eb->output = output;
   eb->buflen = 0;
   eb->linelen = 0;
+  eb->break_lines = break_lines;
   eb->scratch_pool = svn_pool_create(pool);
   stream = svn_stream_create(eb, pool);
   svn_stream_set_write(stream, encode_data);

Modified: subversion/branches/ra-git/subversion/libsvn_subr/cache-inprocess.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/cache-inprocess.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/cache-inprocess.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/cache-inprocess.c Mon Nov 30 10:24:16 2015
@@ -23,8 +23,6 @@
 
 #include <assert.h>
 
-#include <apr_thread_mutex.h>
-
 #include "svn_pools.h"
 
 #include "svn_private_config.h"

Modified: subversion/branches/ra-git/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/cache-membuffer.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/cache-membuffer.c Mon Nov 30 10:24:16 2015
@@ -28,13 +28,18 @@
 #include "svn_pools.h"
 #include "svn_checksum.h"
 #include "svn_private_config.h"
-#include "cache.h"
+#include "svn_hash.h"
 #include "svn_string.h"
 #include "svn_sorts.h"  /* get the MIN macro */
+
 #include "private/svn_atomic.h"
 #include "private/svn_dep_compat.h"
 #include "private/svn_mutex.h"
-#include "private/svn_pseudo_md5.h"
+#include "private/svn_subr_private.h"
+#include "private/svn_string_private.h"
+
+#include "cache.h"
+#include "fnv1a.h"
 
 /*
  * This svn_cache__t implementation actually consists of two parts:
@@ -45,8 +50,9 @@
  * A membuffer cache consists of two parts:
  *
  * 1. A linear data buffer containing cached items in a serialized
- *    representation. There may be arbitrary gaps between entries.
- *    This buffer is sub-devided into (currently two) cache levels.
+ *    representation, prefixed by their full cache keys. There may be
+ *    arbitrary gaps between entries.  This buffer is sub-devided into
+ *    (currently two) cache levels.
  *
  * 2. A directory of cache entries. This is organized similar to CPU
  *    data caches: for every possible key, there is exactly one group
@@ -78,9 +84,10 @@
  * Insertion can occur at only one, sliding position per cache level.  It is
  * marked by its offset in the data buffer and the index of the first used
  * entry at or behind that position.  If this gap is too small to accommodate
- * the new item, the insertion window is extended as described below. The new
- * entry will always be inserted at the bottom end of the window and since
- * the next used entry is known, properly sorted insertion is possible.
+ * the new item (plus its full key), the insertion window is extended as
+ * described below.  The new entry will always be inserted at the bottom end
+ * of the window and since the next used entry is known, properly sorted
+ * insertion is possible.
  *
  * To make the cache perform robustly in a wide range of usage scenarios,
  * L2 uses a randomized variant of LFU (see ensure_data_insertable_l2 for
@@ -104,11 +111,19 @@
  * an already used group to extend it.
  *
  * To limit the entry size and management overhead, not the actual item keys
- * but only their MD5-based hashes will be stored. This is reasonably safe
- * to do since users have only limited control over the full keys, even if
- * these contain folder paths. So, it is very hard to deliberately construct
- * colliding keys. Random checksum collisions can be shown to be extremely
- * unlikely.
+ * but only their hashed "fingerprint" will be stored.  These are reasonably
+ * unique to prevent collisions, so we only need to support up to one entry
+ * per entry key.  To guarantee that there are no conflicts, however, we
+ * store the actual full key immediately in front of the serialized item
+ * data.  That is, the entry offset actually points to the full key and the
+ * key length stored in the entry acts as an additional offset to find the
+ * actual item.
+ *
+ * Most keys are 16 bytes or less.  We use the prefix indexes returned by
+ * a prefix_pool_t instance to uniquely identify the prefix in that case.
+ * Then the combination of prefix index and key stored in the fingerprint
+ * is then unique, too, and can never conflict.  No full key construction,
+ * storage and comparison is needed in that case.
  *
  * All access to the cached data needs to be serialized. Because we want
  * to scale well despite that bottleneck, we simply segment the cache into
@@ -178,17 +193,199 @@
  */
 #define MAX_ITEM_SIZE ((apr_uint32_t)(0 - ITEM_ALIGNMENT))
 
-/* A 16 byte key type. We use that to identify cache entries.
- * The notation as just two integer values will cause many compilers
- * to create better code.
+/* We use this structure to identify cache entries. There cannot be two
+ * entries with the same entry key. However unlikely, though, two different
+ * full keys (see full_key_t) may have the same entry key.  That is a
+ * collision and at most one of them can be stored in the cache at any time.
+ *
+ * If the prefix is shared, which implies that the variable key part is no
+ * longer than 16 bytes, then there is a 1:1 mapping between full key and
+ * entry key.
+ */
+typedef struct entry_key_t
+{
+  /* 16 byte finger print of the full key. */
+  apr_uint64_t fingerprint[2];
+
+  /* Length of the full key.  This value is aligned to ITEM_ALIGNMENT to
+   * make sure the subsequent item content is properly aligned.  If 0,
+   * PREFIX_KEY is implied to be != NO_INDEX. */
+  apr_size_t key_len;
+
+  /* Unique index of the shared key prefix, i.e. it's index within the
+   * prefix pool (see prefix_pool_t).  NO_INDEX if the key prefix is not
+   * shared, otherwise KEY_LEN==0 is implied. */
+  apr_uint32_t prefix_idx;
+} entry_key_t;
+
+/* A full key, i.e. the combination of the cache's key prefix with some
+ * dynamic part appended to it.  It also contains its ENTRY_KEY.
+ *
+ * If the ENTRY_KEY has a 1:1 mapping to the FULL_KEY, then the latter
+ * will be empty and remains unused.
  */
-typedef apr_uint64_t entry_key_t[2];
+typedef struct full_key_t
+{
+  /* Reduced form identifying the cache entry (if such an entry exists). */
+  entry_key_t entry_key;
 
-/* The prefix passed to svn_cache__create_membuffer_cache() effectively
- * defines the type of all items stored by that cache instance. We'll take
- * the last 15 bytes + \0 as plaintext for easy identification by the dev.
+  /* If ENTRY_KEY is not a 1:1 mapping of the prefix + dynamic key
+   * combination,  then this contains the full combination.  Note that the
+   * SIZE element may be larger than ENTRY_KEY.KEY_LEN, but only the latter
+   * determines the valid key size. */
+  svn_membuf_t full_key;
+} full_key_t;
+
+/* A limited capacity, thread-safe pool of unique C strings.  Operations on
+ * this data structure are defined by prefix_pool_* functions.  The only
+ * "public" member is VALUES (r/o access only).
  */
-#define PREFIX_TAIL_LEN 16
+typedef struct prefix_pool_t
+{
+  /* Map C string to a pointer into VALUES with the same contents. */
+  apr_hash_t *map;
+
+  /* Pointer to an array of strings. These are the contents of this pool
+   * and each one of them is referenced by MAP.  Valid indexes are 0 to
+   * VALUES_USED - 1.  May be NULL if VALUES_MAX is 0. */
+  const char **values;
+
+  /* Number of used entries that VALUES may have. */
+  apr_uint32_t values_max;
+
+  /* Number of used entries in VALUES.  Never exceeds VALUES_MAX. */
+  apr_uint32_t values_used;
+
+  /* Maximum number of bytes to allocate. */
+  apr_size_t bytes_max;
+
+  /* Number of bytes currently allocated.  Should not exceed BYTES_MAX but
+   * the implementation may . */
+  apr_size_t bytes_used;
+
+  /* The serialization object. */
+  svn_mutex__t *mutex;
+} prefix_pool_t;
+
+/* Set *PREFIX_POOL to a new instance that tries to limit allocation to
+ * BYTES_MAX bytes.  If MUTEX_REQUIRED is set and multi-threading is
+ * supported, serialize all access to the new instance.  Allocate the
+ * object from *RESULT_POOL. */
+static svn_error_t *
+prefix_pool_create(prefix_pool_t **prefix_pool,
+                   apr_size_t bytes_max,
+                   svn_boolean_t mutex_required,
+                   apr_pool_t *result_pool)
+{
+  enum
+    {
+      /* With 56 byes of overhead under 64 bits, we will probably never get
+       * substantially below this.  If we accidentally do, we will simply
+       * run out of entries in the VALUES array before running out of
+       * allocated memory. */
+      ESTIMATED_BYTES_PER_ENTRY = 120,
+    };
+
+  /* Number of entries we are going to support. */
+  apr_size_t capacity = MIN(APR_UINT32_MAX,
+                            bytes_max / ESTIMATED_BYTES_PER_ENTRY);
+
+  /* Construct the result struct. */
+  prefix_pool_t *result = apr_pcalloc(result_pool, sizeof(*result));
+  result->map = svn_hash__make(result_pool);
+
+  result->values = capacity
+                 ? apr_pcalloc(result_pool, capacity * sizeof(const char *))
+                 : NULL;
+  result->values_max = (apr_uint32_t)capacity;
+  result->values_used = 0;
+
+  result->bytes_max = bytes_max;
+  result->bytes_used = capacity * sizeof(svn_membuf_t);
+
+  SVN_ERR(svn_mutex__init(&result->mutex, mutex_required, result_pool));
+
+  /* Done. */
+  *prefix_pool = result;
+  return SVN_NO_ERROR;
+}
+
+/* Set *PREFIX_IDX to the offset in PREFIX_POOL->VALUES that contains the
+ * value PREFIX.  If none exists, auto-insert it.  If we can't due to
+ * capacity exhaustion, set *PREFIX_IDX to NO_INDEX.
+ * To be called by prefix_pool_get() only. */
+static svn_error_t *
+prefix_pool_get_internal(apr_uint32_t *prefix_idx,
+                         prefix_pool_t *prefix_pool,
+                         const char *prefix)
+{
+  enum
+    {
+      /* Size of an hash entry plus (max.) APR alignment loss.
+       *
+       * This may be slightly off if e.g. APR changes its internal data
+       * structures but that will translate in just a few percent (~10%)
+       * over-allocation.  Memory consumption will still be capped.
+       */
+      OVERHEAD = 40 + 8
+    };
+
+  const char **value;
+  apr_size_t prefix_len = strlen(prefix);
+  apr_size_t bytes_needed;
+  apr_pool_t *pool;
+
+  /* Lookup.  If we already know that prefix, return its index. */
+  value = apr_hash_get(prefix_pool->map, prefix, prefix_len);
+  if (value != NULL)
+    {
+      const apr_size_t idx = value - prefix_pool->values;
+      SVN_ERR_ASSERT(idx < prefix_pool->values_used);
+      *prefix_idx = (apr_uint32_t) idx;
+      return SVN_NO_ERROR;
+    }
+
+  /* Capacity checks. */
+  if (prefix_pool->values_used == prefix_pool->values_max)
+    {
+      *prefix_idx = NO_INDEX;
+      return SVN_NO_ERROR;
+    }
+
+  bytes_needed = prefix_len + 1 + OVERHEAD;
+  assert(prefix_pool->bytes_max >= prefix_pool->bytes_used);
+  if (prefix_pool->bytes_max - prefix_pool->bytes_used > bytes_needed)
+    {
+      *prefix_idx = NO_INDEX;
+      return SVN_NO_ERROR;
+    }
+
+  /* Add new entry. */
+  pool = apr_hash_pool_get(prefix_pool->map);
+
+  value = &prefix_pool->values[prefix_pool->values_used];
+  *value = apr_pstrndup(pool, prefix, prefix_len + 1);
+  apr_hash_set(prefix_pool->map, *value, prefix_len, value);
+
+  *prefix_idx = prefix_pool->values_used;
+  ++prefix_pool->values_used;
+  prefix_pool->bytes_used += bytes_needed;
+
+  return SVN_NO_ERROR;
+}
+
+/* Thread-safe wrapper around prefix_pool_get_internal. */
+static svn_error_t *
+prefix_pool_get(apr_uint32_t *prefix_idx,
+                prefix_pool_t *prefix_pool,
+                const char *prefix)
+{
+  SVN_MUTEX__WITH_LOCK(prefix_pool->mutex,
+                       prefix_pool_get_internal(prefix_idx, prefix_pool,
+                                                prefix));
+
+  return SVN_NO_ERROR;
+}
 
 /* Debugging / corruption detection support.
  * If you define this macro, the getter functions will performed expensive
@@ -198,6 +395,12 @@ typedef apr_uint64_t entry_key_t[2];
  */
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
+/* The prefix passed to svn_cache__create_membuffer_cache() effectively
+ * defines the type of all items stored by that cache instance. We'll take
+ * the last 15 bytes + \0 as plaintext for easy identification by the dev.
+ */
+#define PREFIX_TAIL_LEN 16
+
 /* This record will be attached to any cache entry. It tracks item data
  * (content), key and type as hash values and is the baseline against which
  * the getters will compare their results to detect inconsistencies.
@@ -233,22 +436,36 @@ typedef struct entry_tag_t
 /* Initialize all members of TAG except for the content hash.
  */
 static svn_error_t *store_key_part(entry_tag_t *tag,
-                                   entry_key_t prefix_hash,
-                                   char *prefix_tail,
+                                   const char *prefix,
                                    const void *key,
                                    apr_size_t key_len,
-                                   apr_pool_t *pool)
+                                   apr_pool_t *scratch_pool)
 {
   svn_checksum_t *checksum;
+  apr_size_t prefix_len = strlen(prefix);
+
+  if (prefix_len > sizeof(tag->prefix_tail))
+    {
+      prefix += prefix_len - (sizeof(tag->prefix_tail) - 1);
+      prefix_len = sizeof(tag->prefix_tail) - 1;
+    }
+
+  SVN_ERR(svn_checksum(&checksum,
+                       svn_checksum_md5,
+                       prefix,
+                       strlen(prefix),
+                       scratch_pool));
+  memcpy(tag->prefix_hash, checksum->digest, sizeof(tag->prefix_hash));
+
   SVN_ERR(svn_checksum(&checksum,
                        svn_checksum_md5,
                        key,
                        key_len,
-                       pool));
-
-  memcpy(tag->prefix_hash, prefix_hash, sizeof(tag->prefix_hash));
+                       scratch_pool));
   memcpy(tag->key_hash, checksum->digest, sizeof(tag->key_hash));
-  memcpy(tag->prefix_tail, prefix_tail, sizeof(tag->prefix_tail));
+
+  memset(tag->prefix_tail, 0, sizeof(tag->key_hash));
+  memcpy(tag->prefix_tail, prefix, prefix_len + 1);
 
   tag->key_len = key_len;
 
@@ -258,7 +475,7 @@ static svn_error_t *store_key_part(entry
 /* Initialize the content hash member of TAG.
  */
 static svn_error_t* store_content_part(entry_tag_t *tag,
-                                       const char *data,
+                                       const void *data,
                                        apr_size_t size,
                                        apr_pool_t *pool)
 {
@@ -305,8 +522,7 @@ static svn_error_t* assert_equal_tags(co
   entry_tag_t *tag = &_tag;                                      \
   if (key)                                                       \
     SVN_ERR(store_key_part(tag,                                  \
-                           cache->prefix,                        \
-                           cache->info_prefix,                   \
+                           get_prefix_key(cache),                \
                            key,                                  \
                            cache->key_len == APR_HASH_KEY_STRING \
                                ? strlen((const char *) key)      \
@@ -323,23 +539,6 @@ static svn_error_t* assert_equal_tags(co
 
 #endif /* SVN_DEBUG_CACHE_MEMBUFFER */
 
-/* Per svn_cache_t instance initialization helper.
- * Copy the last to up PREFIX_TAIL_LEN-1 chars from PREFIX to PREFIX_TAIL.
- * If the prefix has been structured by ':', only store the last element
- * (which will tell us the type).
- */
-static void get_prefix_tail(const char *prefix, char *prefix_tail)
-{
-  apr_size_t len = strlen(prefix);
-  apr_size_t to_copy = MIN(len, PREFIX_TAIL_LEN - 1);
-  const char *last_colon = strrchr(prefix, ':');
-  apr_size_t last_element_pos = last_colon ? 0 : last_colon - prefix + 1;
-
-  to_copy = MIN(to_copy, len - last_element_pos);
-  memset(prefix_tail, 0, PREFIX_TAIL_LEN);
-  memcpy(prefix_tail, prefix + len - to_copy, to_copy);
-}
-
 /* A single dictionary entry. Since all entries will be allocated once
  * during cache creation, those entries might be either used or unused.
  * An entry is used if and only if it is contained in the doubly-linked
@@ -360,7 +559,7 @@ typedef struct entry_t
    * above ensures that there will be no overflows.
    * Only valid for used entries.
    */
-  apr_uint32_t size;
+  apr_size_t size;
 
   /* Number of (read) hits for this entry. Will be reset upon write.
    * Only valid for used entries.
@@ -498,6 +697,12 @@ struct svn_membuffer_t
      and that all segments must / will report the same values here. */
   apr_uint32_t segment_count;
 
+  /* Collection of prefixes shared among all instances accessing the
+   * same membuffer cache backend.  If a prefix is contained in this
+   * pool then all cache instances using an equal prefix must actually
+   * use the one stored in this pool. */
+  prefix_pool_t *prefix_pool;
+
   /* The dictionary, GROUP_SIZE * (group_count + spare_group_count)
    * entries long.  Never NULL.
    */
@@ -779,8 +984,8 @@ initialize_group(svn_membuffer_t *cache,
   apr_uint32_t first_index =
       (group_index / GROUP_INIT_GRANULARITY) * GROUP_INIT_GRANULARITY;
   apr_uint32_t last_index = first_index + GROUP_INIT_GRANULARITY;
-  if (last_index > cache->group_count)
-    last_index = cache->group_count;
+  if (last_index > cache->group_count + cache->spare_group_count)
+    last_index = cache->group_count + cache->spare_group_count;
 
   for (i = first_index; i < last_index; ++i)
     {
@@ -1122,17 +1327,19 @@ insert_entry(svn_membuffer_t *cache, ent
  */
 static apr_uint32_t
 get_group_index(svn_membuffer_t **cache,
-                entry_key_t key)
+                const entry_key_t *key)
 {
   svn_membuffer_t *segment0 = *cache;
+  apr_uint64_t key0 = key->fingerprint[0];
+  apr_uint64_t key1 = key->fingerprint[1];
 
   /* select the cache segment to use. they have all the same group_count.
    * Since key may not be well-distributed, pre-fold it to a smaller but
    * "denser" ranger.  The modulus is a prime larger than the largest
    * counts. */
-  *cache = &segment0[(key[1] % APR_UINT64_C(2809637) + (key[0] / 37))
+  *cache = &segment0[(key1 % APR_UINT64_C(2809637) + (key0 / 37))
                      & (segment0->segment_count - 1)];
-  return (key[0] % APR_UINT64_C(5030895599)) % segment0->group_count;
+  return (key0 % APR_UINT64_C(5030895599)) % segment0->group_count;
 }
 
 /* Reduce the hit count of ENTRY and update the accumulated hit info
@@ -1153,6 +1360,18 @@ let_entry_age(svn_membuffer_t *cache, en
     }
 }
 
+/* Return whether the keys in LHS and RHS match.
+ */
+static svn_boolean_t
+entry_keys_match(const entry_key_t *lhs,
+                 const entry_key_t *rhs)
+{
+  return (lhs->fingerprint[0] == rhs->fingerprint[0])
+      && (lhs->fingerprint[1] == rhs->fingerprint[1])
+      && (lhs->prefix_idx == rhs->prefix_idx)
+      && (lhs->key_len == rhs->key_len);
+}
+
 /* Given the GROUP_INDEX that shall contain an entry with the hash key
  * TO_FIND, find that entry in the specified group.
  *
@@ -1164,11 +1383,15 @@ let_entry_age(svn_membuffer_t *cache, en
  * new content), an unused entry or a forcibly removed entry (if all
  * group entries are currently in use). The entries' hash value will be
  * initialized with TO_FIND.
+ *
+ * Note: This function requires the caller to appropriately lock the CACHE.
+ * For FIND_EMPTY==FALSE, a read lock is required, for FIND_EMPTY==TRUE,
+ * the write lock must have been acquired.
  */
 static entry_t *
 find_entry(svn_membuffer_t *cache,
            apr_uint32_t group_index,
-           const apr_uint64_t to_find[2],
+           const full_key_t *to_find,
            svn_boolean_t find_empty)
 {
   entry_group_t *group;
@@ -1189,8 +1412,7 @@ find_entry(svn_membuffer_t *cache,
           entry = &group->entries[0];
 
           /* initialize entry for the new key */
-          entry->key[0] = to_find[0];
-          entry->key[1] = to_find[1];
+          entry->key = to_find->entry_key;
         }
 
       return entry;
@@ -1201,14 +1423,29 @@ find_entry(svn_membuffer_t *cache,
   while (1)
     {
       for (i = 0; i < group->header.used; ++i)
-        if (   to_find[0] == group->entries[i].key[0]
-            && to_find[1] == group->entries[i].key[1])
+        if (entry_keys_match(&group->entries[i].key, &to_find->entry_key))
           {
-            /* found it
-             */
+            /* This is the only entry that _may_ contain the correct data. */
             entry = &group->entries[i];
+
+            /* If we want to preserve it, check that it is actual a match. */
             if (!find_empty)
-              return entry;
+              {
+                /* If the full key is fully defined in prefix_id & mangeled
+                 * key, we are done. */
+                if (!entry->key.key_len)
+                  return entry;
+
+                /* Compare the full key. */
+                if (memcmp(to_find->full_key.data,
+                           cache->data + entry->offset,
+                           entry->key.key_len) == 0)
+                  return entry;
+
+                /* Key conflict. The entry to find cannot be anywhere else.
+                 * Therefore, it is not cached. */
+                return NULL;
+              }
 
             /* need to empty that entry */
             drop_entry(cache, entry);
@@ -1218,6 +1455,8 @@ find_entry(svn_membuffer_t *cache,
               group = last_group_in_chain(cache,
                                           &cache->directory[group_index]);
 
+            /* No entry found (actually, none left to find). */
+            entry = NULL;
             break;
           }
 
@@ -1292,7 +1531,7 @@ find_entry(svn_membuffer_t *cache,
            */
           for (i = 0; i < GROUP_SIZE; ++i)
             if (entry != &to_shrink->entries[i])
-              let_entry_age(cache, entry);
+              let_entry_age(cache, &to_shrink->entries[i]);
 
           drop_entry(cache, entry);
         }
@@ -1300,8 +1539,7 @@ find_entry(svn_membuffer_t *cache,
       /* initialize entry for the new key
        */
       entry = &group->entries[group->header.used];
-      entry->key[0] = to_find[0];
-      entry->key[1] = to_find[1];
+      entry->key = to_find->entry_key;
     }
 
   return entry;
@@ -1427,12 +1665,12 @@ ensure_data_insertable_l2(svn_membuffer_
 
       /* leave function as soon as the insertion window is large enough
        */
-      if (end >= to_fit_in->size + cache->l2.current_data)
+      if (end - cache->l2.current_data >= to_fit_in->size)
         return TRUE;
 
       /* Don't be too eager to cache data.  If a lot of data has been moved
        * around, the current item has probably a relatively low priority.
-       * We must also limit the effort spent here (if even in case of faulty
+       * We must also limit the effort spent here (even in case of faulty
        * heuristics).  Therefore, give up after some time.
        */
       if (moved_size > 4 * to_fit_in->size && moved_count > 7)
@@ -1552,7 +1790,7 @@ ensure_data_insertable_l1(svn_membuffer_
 
       /* leave function as soon as the insertion window is large enough
        */
-      if (end >= size + cache->l1.current_data)
+      if (end - cache->l1.current_data >= size)
         return TRUE;
 
       /* Enlarge the insertion window
@@ -1621,6 +1859,7 @@ svn_cache__membuffer_cache_create(svn_me
                                   apr_pool_t *pool)
 {
   svn_membuffer_t *c;
+  prefix_pool_t *prefix_pool;
 
   apr_uint32_t seg;
   apr_uint32_t group_count;
@@ -1630,6 +1869,12 @@ svn_cache__membuffer_cache_create(svn_me
   apr_uint64_t data_size;
   apr_uint64_t max_entry_size;
 
+  /* Allocate 1% of the cache capacity to the prefix string pool.
+   */
+  SVN_ERR(prefix_pool_create(&prefix_pool, total_size / 100, thread_safe,
+                             pool));
+  total_size -= total_size / 100;
+
   /* Limit the total size (only relevant if we can address > 4GB)
    */
 #if APR_SIZEOF_VOIDP > 4
@@ -1719,7 +1964,7 @@ svn_cache__membuffer_cache_create(svn_me
                  : data_size / 8;
 
   /* to keep the entries small, we use 32 bit indexes only
-   * -> we need to ensure that no more then 4G entries exist.
+   * -> we need to ensure that no more than 4G entries exist.
    *
    * Note, that this limit could only be exceeded in a very
    * theoretical setup with about 1EB of cache.
@@ -1740,14 +1985,18 @@ svn_cache__membuffer_cache_create(svn_me
       /* allocate buffers and initialize cache members
        */
       c[seg].segment_count = (apr_uint32_t)segment_count;
+      c[seg].prefix_pool = prefix_pool;
 
       c[seg].group_count = main_group_count;
       c[seg].spare_group_count = spare_group_count;
       c[seg].first_spare_group = NO_INDEX;
       c[seg].max_spare_used = 0;
 
-      c[seg].directory = apr_pcalloc(pool,
-                                     group_count * sizeof(entry_group_t));
+      /* Allocate but don't clear / zero the directory because it would add
+         significantly to the server start-up time if the caches are large.
+         Group initialization will take care of that in stead. */
+      c[seg].directory = apr_palloc(pool,
+                                    group_count * sizeof(entry_group_t));
 
       /* Allocate and initialize directory entries as "not initialized",
          hence "unused" */
@@ -1883,7 +2132,7 @@ svn_cache__membuffer_clear(svn_membuffer
 static svn_error_t *
 entry_exists_internal(svn_membuffer_t *cache,
                       apr_uint32_t group_index,
-                      entry_key_t to_find,
+                      const full_key_t *to_find,
                       svn_boolean_t *found)
 {
   *found = find_entry(cache, group_index, to_find, FALSE) != NULL;
@@ -1896,7 +2145,7 @@ entry_exists_internal(svn_membuffer_t *c
 static svn_error_t *
 entry_exists(svn_membuffer_t *cache,
              apr_uint32_t group_index,
-             entry_key_t to_find,
+             const full_key_t *to_find,
              svn_boolean_t *found)
 {
   WITH_READ_LOCK(cache,
@@ -1929,9 +2178,9 @@ select_level(svn_membuffer_t *cache,
            && priority > SVN_CACHE__MEMBUFFER_DEFAULT_PRIORITY)
     {
       /* Large but important items go into L2. */
-      entry_t dummy_entry = { { 0 } };
+      entry_t dummy_entry = { { { 0 } } };
       dummy_entry.priority = priority;
-      dummy_entry.size = (apr_uint32_t) size;
+      dummy_entry.size = size;
 
       return ensure_data_insertable_l2(cache, &dummy_entry)
            ? &cache->l2
@@ -1942,9 +2191,9 @@ select_level(svn_membuffer_t *cache,
   return NULL;
 }
 
-/* Try to insert the serialized item given in BUFFER with SIZE into
- * the group GROUP_INDEX of CACHE and uniquely identify it by hash
- * value TO_FIND.
+/* Try to insert the serialized item given in BUFFER with ITEM_SIZE
+ * into the group GROUP_INDEX of CACHE and uniquely identify it by
+ * hash value TO_FIND.
  *
  * However, there is no guarantee that it will actually be put into
  * the cache. If there is already some data associated with TO_FIND,
@@ -1952,19 +2201,20 @@ select_level(svn_membuffer_t *cache,
  * be inserted.
  *
  * Note: This function requires the caller to serialization access.
- * Don't call it directly, call membuffer_cache_get_partial instead.
+ * Don't call it directly, call membuffer_cache_set instead.
  */
 static svn_error_t *
 membuffer_cache_set_internal(svn_membuffer_t *cache,
-                             entry_key_t to_find,
+                             const full_key_t *to_find,
                              apr_uint32_t group_index,
                              char *buffer,
-                             apr_size_t size,
+                             apr_size_t item_size,
                              apr_uint32_t priority,
                              DEBUG_CACHE_MEMBUFFER_TAG_ARG
                              apr_pool_t *scratch_pool)
 {
   cache_level_t *level;
+  apr_size_t size = item_size + to_find->entry_key.key_len;
 
   /* first, look for a previous entry for the given key */
   entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
@@ -1978,20 +2228,24 @@ membuffer_cache_set_internal(svn_membuff
        * negative value.
        */
       cache->data_used += (apr_uint64_t)size - entry->size;
-      entry->size = (apr_uint32_t) size;
+      entry->size = size;
       entry->priority = priority;
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
       /* Remember original content, type and key (hashes)
        */
-      SVN_ERR(store_content_part(tag, buffer, size, scratch_pool));
+      SVN_ERR(store_content_part(tag, buffer, item_size, scratch_pool));
       memcpy(&entry->tag, tag, sizeof(*tag));
 
 #endif
 
-      if (size)
-        memcpy(cache->data + entry->offset, buffer, size);
+      if (entry->key.key_len)
+        memcpy(cache->data + entry->offset, to_find->full_key.data,
+               entry->key.key_len);
+      if (item_size)
+        memcpy(cache->data + entry->offset + entry->key.key_len, buffer,
+               item_size);
 
       cache->total_writes++;
       return SVN_NO_ERROR;
@@ -2007,7 +2261,7 @@ membuffer_cache_set_internal(svn_membuff
        * the serialized item's (future) position within data buffer.
        */
       entry = find_entry(cache, group_index, to_find, TRUE);
-      entry->size = (apr_uint32_t) size;
+      entry->size = size;
       entry->offset = level->current_data;
       entry->priority = priority;
 
@@ -2015,7 +2269,7 @@ membuffer_cache_set_internal(svn_membuff
 
       /* Remember original content, type and key (hashes)
        */
-      SVN_ERR(store_content_part(tag, buffer, size, scratch_pool));
+      SVN_ERR(store_content_part(tag, buffer, item_size, scratch_pool));
       memcpy(&entry->tag, tag, sizeof(*tag));
 
 #endif
@@ -2026,8 +2280,12 @@ membuffer_cache_set_internal(svn_membuff
 
       /* Copy the serialized item data into the cache.
        */
-      if (size)
-        memcpy(cache->data + entry->offset, buffer, size);
+      if (entry->key.key_len)
+        memcpy(cache->data + entry->offset, to_find->full_key.data,
+               entry->key.key_len);
+      if (item_size)
+        memcpy(cache->data + entry->offset + entry->key.key_len, buffer,
+               item_size);
 
       cache->total_writes++;
     }
@@ -2056,7 +2314,7 @@ membuffer_cache_set_internal(svn_membuff
  */
 static svn_error_t *
 membuffer_cache_set(svn_membuffer_t *cache,
-                    entry_key_t key,
+                    const full_key_t *key,
                     void *item,
                     svn_cache__serialize_func_t serializer,
                     apr_uint32_t priority,
@@ -2069,7 +2327,7 @@ membuffer_cache_set(svn_membuffer_t *cac
 
   /* find the entry group that will hold the key.
    */
-  group_index = get_group_index(&cache, key);
+  group_index = get_group_index(&cache, &key->entry_key);
 
   /* Serialize data data.
    */
@@ -2112,12 +2370,12 @@ increment_hit_counters(svn_membuffer_t *
  * be done in POOL.
  *
  * Note: This function requires the caller to serialization access.
- * Don't call it directly, call membuffer_cache_get_partial instead.
+ * Don't call it directly, call membuffer_cache_get instead.
  */
 static svn_error_t *
 membuffer_cache_get_internal(svn_membuffer_t *cache,
                              apr_uint32_t group_index,
-                             entry_key_t to_find,
+                             const full_key_t *to_find,
                              char **buffer,
                              apr_size_t *item_size,
                              DEBUG_CACHE_MEMBUFFER_TAG_ARG
@@ -2140,9 +2398,9 @@ membuffer_cache_get_internal(svn_membuff
       return SVN_NO_ERROR;
     }
 
-  size = ALIGN_VALUE(entry->size);
+  size = ALIGN_VALUE(entry->size) - entry->key.key_len;
   *buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1));
-  memcpy(*buffer, (const char*)cache->data + entry->offset, size);
+  memcpy(*buffer, cache->data + entry->offset + entry->key.key_len, size);
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
@@ -2154,7 +2412,8 @@ membuffer_cache_get_internal(svn_membuff
 
   /* Compare original content, type and key (hashes)
    */
-  SVN_ERR(store_content_part(tag, *buffer, entry->size, result_pool));
+  SVN_ERR(store_content_part(tag, *buffer, entry->size - entry->key.key_len,
+                             result_pool));
   SVN_ERR(assert_equal_tags(&entry->tag, tag));
 
 #endif
@@ -2162,19 +2421,19 @@ membuffer_cache_get_internal(svn_membuff
   /* update hit statistics
    */
   increment_hit_counters(cache, entry);
-  *item_size = entry->size;
+  *item_size = entry->size - entry->key.key_len;
 
   return SVN_NO_ERROR;
 }
 
 /* Look for the *ITEM identified by KEY. If no item has been stored
  * for KEY, *ITEM will be NULL. Otherwise, the DESERIALIZER is called
- * re-construct the proper object from the serialized data.
+ * to re-construct the proper object from the serialized data.
  * Allocations will be done in POOL.
  */
 static svn_error_t *
 membuffer_cache_get(svn_membuffer_t *cache,
-                    entry_key_t key,
+                    const full_key_t *key,
                     void **item,
                     svn_cache__deserialize_func_t deserializer,
                     DEBUG_CACHE_MEMBUFFER_TAG_ARG
@@ -2186,7 +2445,7 @@ membuffer_cache_get(svn_membuffer_t *cac
 
   /* find the entry group that will hold the key.
    */
-  group_index = get_group_index(&cache, key);
+  group_index = get_group_index(&cache, &key->entry_key);
   WITH_READ_LOCK(cache,
                  membuffer_cache_get_internal(cache,
                                               group_index,
@@ -2214,7 +2473,7 @@ membuffer_cache_get(svn_membuffer_t *cac
 static svn_error_t *
 membuffer_cache_has_key_internal(svn_membuffer_t *cache,
                                  apr_uint32_t group_index,
-                                 entry_key_t to_find,
+                                 const full_key_t *to_find,
                                  svn_boolean_t *found)
 {
   entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
@@ -2243,12 +2502,12 @@ membuffer_cache_has_key_internal(svn_mem
  */
 static svn_error_t *
 membuffer_cache_has_key(svn_membuffer_t *cache,
-                        entry_key_t key,
+                        const full_key_t *key,
                         svn_boolean_t *found)
 {
   /* find the entry group that will hold the key.
    */
-  apr_uint32_t group_index = get_group_index(&cache, key);
+  apr_uint32_t group_index = get_group_index(&cache, &key->entry_key);
   cache->total_reads++;
 
   WITH_READ_LOCK(cache,
@@ -2274,7 +2533,7 @@ membuffer_cache_has_key(svn_membuffer_t
 static svn_error_t *
 membuffer_cache_get_partial_internal(svn_membuffer_t *cache,
                                      apr_uint32_t group_index,
-                                     entry_key_t to_find,
+                                     const full_key_t *to_find,
                                      void **item,
                                      svn_boolean_t *found,
                                      svn_cache__partial_getter_func_t deserializer,
@@ -2293,6 +2552,8 @@ membuffer_cache_get_partial_internal(svn
     }
   else
     {
+      const void *item_data = cache->data + entry->offset + entry->key.key_len;
+      apr_size_t item_size = entry->size - entry->key.key_len;
       *found = TRUE;
       increment_hit_counters(cache, entry);
 
@@ -2306,19 +2567,12 @@ membuffer_cache_get_partial_internal(svn
 
       /* Compare original content, type and key (hashes)
        */
-      SVN_ERR(store_content_part(tag,
-                                 (const char*)cache->data + entry->offset,
-                                 entry->size,
-                                 result_pool));
+      SVN_ERR(store_content_part(tag, item_data, item_size, result_pool));
       SVN_ERR(assert_equal_tags(&entry->tag, tag));
 
 #endif
 
-      return deserializer(item,
-                          (const char*)cache->data + entry->offset,
-                          entry->size,
-                          baton,
-                          result_pool);
+      return deserializer(item, item_data, item_size, baton, result_pool);
     }
 }
 
@@ -2330,7 +2584,7 @@ membuffer_cache_get_partial_internal(svn
  */
 static svn_error_t *
 membuffer_cache_get_partial(svn_membuffer_t *cache,
-                            entry_key_t key,
+                            const full_key_t *key,
                             void **item,
                             svn_boolean_t *found,
                             svn_cache__partial_getter_func_t deserializer,
@@ -2338,7 +2592,7 @@ membuffer_cache_get_partial(svn_membuffe
                             DEBUG_CACHE_MEMBUFFER_TAG_ARG
                             apr_pool_t *result_pool)
 {
-  apr_uint32_t group_index = get_group_index(&cache, key);
+  apr_uint32_t group_index = get_group_index(&cache, &key->entry_key);
 
   WITH_READ_LOCK(cache,
                  membuffer_cache_get_partial_internal
@@ -2362,7 +2616,7 @@ membuffer_cache_get_partial(svn_membuffe
 static svn_error_t *
 membuffer_cache_set_partial_internal(svn_membuffer_t *cache,
                                      apr_uint32_t group_index,
-                                     entry_key_t to_find,
+                                     const full_key_t *to_find,
                                      svn_cache__partial_setter_func_t func,
                                      void *baton,
                                      DEBUG_CACHE_MEMBUFFER_TAG_ARG
@@ -2380,9 +2634,10 @@ membuffer_cache_set_partial_internal(svn
       svn_error_t *err;
 
       /* access the serialized cache item */
-      char *data = (char*)cache->data + entry->offset;
-      char *orig_data = data;
-      apr_size_t size = entry->size;
+      apr_size_t key_len = entry->key.key_len;
+      void *item_data = cache->data + entry->offset + key_len;
+      void *orig_data = item_data;
+      apr_size_t item_size = entry->size - key_len;
 
       increment_hit_counters(cache, entry);
       cache->total_writes++;
@@ -2392,19 +2647,19 @@ membuffer_cache_set_partial_internal(svn
       /* Check for overlapping entries.
        */
       SVN_ERR_ASSERT(entry->next == NO_INDEX ||
-                     entry->offset + size
+                     entry->offset + entry->size
                         <= get_entry(cache, entry->next)->offset);
 
       /* Compare original content, type and key (hashes)
        */
-      SVN_ERR(store_content_part(tag, data, size, scratch_pool));
+      SVN_ERR(store_content_part(tag, item_data, item_size, scratch_pool));
       SVN_ERR(assert_equal_tags(&entry->tag, tag));
 
 #endif
 
       /* modify it, preferably in-situ.
        */
-      err = func((void **)&data, &size, baton, scratch_pool);
+      err = func(&item_data, &item_size, baton, scratch_pool);
 
       if (err)
         {
@@ -2421,21 +2676,28 @@ membuffer_cache_set_partial_internal(svn
           /* if the modification caused a re-allocation, we need to remove
            * the old entry and to copy the new data back into cache.
            */
-          if (data != orig_data)
+          if (item_data != orig_data)
             {
               /* Remove the old entry and try to make space for the new one.
+               * Note that the key has already been stored in the past, i.e.
+               * it is shorter than the MAX_ENTRY_SIZE.
                */
               drop_entry(cache, entry);
-              if (   (cache->max_entry_size >= size)
-                  && ensure_data_insertable_l1(cache, size))
+              if (   (cache->max_entry_size - key_len >= item_size)
+                  && ensure_data_insertable_l1(cache, item_size + key_len))
                 {
                   /* Write the new entry.
                    */
                   entry = find_entry(cache, group_index, to_find, TRUE);
-                  entry->size = (apr_uint32_t) size;
+                  entry->size = item_size + key_len;
                   entry->offset = cache->l1.current_data;
-                  if (size)
-                    memcpy(cache->data + entry->offset, data, size);
+
+                  if (key_len)
+                    memcpy(cache->data + entry->offset,
+                           to_find->full_key.data, key_len);
+                  if (item_size)
+                    memcpy(cache->data + entry->offset + key_len, item_data,
+                           item_size);
 
                   /* Link the entry properly.
                    */
@@ -2447,7 +2709,7 @@ membuffer_cache_set_partial_internal(svn
 
           /* Remember original content, type and key (hashes)
            */
-          SVN_ERR(store_content_part(tag, data, size, scratch_pool));
+          SVN_ERR(store_content_part(tag, item_data, item_size, scratch_pool));
           memcpy(&entry->tag, tag, sizeof(*tag));
 
 #endif
@@ -2464,7 +2726,7 @@ membuffer_cache_set_partial_internal(svn
  */
 static svn_error_t *
 membuffer_cache_set_partial(svn_membuffer_t *cache,
-                            entry_key_t key,
+                            const full_key_t *key,
                             svn_cache__partial_setter_func_t func,
                             void *baton,
                             DEBUG_CACHE_MEMBUFFER_TAG_ARG
@@ -2472,7 +2734,7 @@ membuffer_cache_set_partial(svn_membuffe
 {
   /* cache item lookup
    */
-  apr_uint32_t group_index = get_group_index(&cache, key);
+  apr_uint32_t group_index = get_group_index(&cache, &key->entry_key);
   WITH_WRITE_LOCK(cache,
                   membuffer_cache_set_partial_internal
                      (cache, group_index, key, func, baton,
@@ -2498,22 +2760,6 @@ membuffer_cache_set_partial(svn_membuffe
  * svn_cache__t instance.
  */
 
-/* Stores the combined key value for the given key.  It will be used by
- * combine_key() to short-circuit expensive hash calculations.
- */
-typedef struct last_access_key_t
-{
-  /* result of key combining */
-  entry_key_t combined_key;
-
-  /* length of the key (or APR_HASH_KEY_STRING if not used) */
-  apr_ssize_t key_len;
-
-  /* the original key.  Only KEY_LEN bytes are valid.  We use uint32 for
-   * better compatibility with pseudo-md5 functions. */
-  apr_uint32_t key[64];
-} last_access_key_t;
-
 /* Internal cache structure (used in svn_cache__t.cache_internal) basically
  * holding the additional parameters needed to call the respective membuffer
  * functions.
@@ -2532,17 +2778,12 @@ typedef struct svn_membuffer_cache_t
    */
   svn_cache__deserialize_func_t deserializer;
 
-  /* Prepend this byte sequence to any key passed to us.
-   * This makes (very likely) our keys different from all keys used
-   * by other svn_membuffer_cache_t instances.
+  /* Prepend this to any key passed to us.
+   * This makes our keys different from all keys used by svn_membuffer_cache_t
+   * instances that we don't want to share cached data with.
    */
   entry_key_t prefix;
 
-  /* The tail of the prefix string. It is being used as a developer-visible
-   * ID for this cache instance.
-   */
-  char info_prefix[PREFIX_TAIL_LEN];
-
   /* length of the keys that will be passed to us through the
    * svn_cache_t interface. May be APR_HASH_KEY_STRING.
    */
@@ -2553,22 +2794,21 @@ typedef struct svn_membuffer_cache_t
 
   /* Temporary buffer containing the hash key for the current access
    */
-  entry_key_t combined_key;
-
-  /* cache for the last key used.
-   * Will be NULL for caches with short fix-sized keys.
-   */
-  last_access_key_t *last_access;
+  full_key_t combined_key;
 
   /* if enabled, this will serialize the access to this instance.
    */
   svn_mutex__t *mutex;
 } svn_membuffer_cache_t;
 
-/* After an estimated ALLOCATIONS_PER_POOL_CLEAR allocations, we should
- * clear the svn_membuffer_cache_t.pool to keep memory consumption in check.
- */
-#define ALLOCATIONS_PER_POOL_CLEAR 10
+/* Return the prefix key used by CACHE. */
+static const char *
+get_prefix_key(const svn_membuffer_cache_t *cache)
+{
+  return (cache->prefix.prefix_idx == NO_INDEX
+       ? cache->combined_key.full_key.data
+       : cache->membuffer->prefix_pool->values[cache->prefix.prefix_idx]);
+}
 
 /* Basically calculate a hash value for KEY of length KEY_LEN, combine it
  * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
@@ -2580,70 +2820,35 @@ combine_long_key(svn_membuffer_cache_t *
                  const void *key,
                  apr_ssize_t key_len)
 {
-  assert(cache->last_access);
+  apr_uint32_t *digest_buffer;
+  char *key_copy;
+  apr_size_t prefix_len = cache->prefix.key_len;
+  apr_size_t aligned_key_len;
 
   /* handle variable-length keys */
   if (key_len == APR_HASH_KEY_STRING)
     key_len = strlen((const char *) key);
 
-  /* same key as the last time? -> short-circuit */
-  if (   key_len == cache->last_access->key_len
-      && memcmp(key, cache->last_access->key, key_len) == 0)
-    {
-      memcpy(cache->combined_key, cache->last_access->combined_key,
-             sizeof(cache->combined_key));
-    }
-  else if (key_len >= 64)
-    {
-      /* relatively long key.  Use the generic, slow hash code for it */
-      apr_md5((unsigned char*)cache->combined_key, key, key_len);
-      cache->combined_key[0] ^= cache->prefix[0];
-      cache->combined_key[1] ^= cache->prefix[1];
-
-      /* is the key short enough to cache the result? */
-      if (key_len <= sizeof(cache->last_access->key))
-        {
-          memcpy(cache->last_access->combined_key, cache->combined_key,
-                 sizeof(cache->combined_key));
-          cache->last_access->key_len = key_len;
-          memcpy(cache->last_access->key, key, key_len);
-        }
-    }
-  else
-    {
-      /* shorter keys use efficient hash code and *do* cache the results */
-      cache->last_access->key_len = key_len;
-      if (key_len < 16)
-        {
-          memset(cache->last_access->key, 0, 16);
-          memcpy(cache->last_access->key, key, key_len);
-
-          svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key,
-                             cache->last_access->key);
-        }
-      else if (key_len < 32)
-        {
-          memset(cache->last_access->key, 0, 32);
-          memcpy(cache->last_access->key, key, key_len);
-
-          svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key,
-                             cache->last_access->key);
-        }
-      else
-        {
-          memset(cache->last_access->key, 0, 64);
-          memcpy(cache->last_access->key, key, key_len);
-
-          svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key,
-                             cache->last_access->key);
-        }
-
-      cache->combined_key[0] ^= cache->prefix[0];
-      cache->combined_key[1] ^= cache->prefix[1];
+  aligned_key_len = ALIGN_VALUE(key_len);
 
-      memcpy(cache->last_access->combined_key, cache->combined_key,
-             sizeof(cache->combined_key));
-    }
+  /* Combine keys. */
+  svn_membuf__ensure(&cache->combined_key.full_key,
+                     aligned_key_len + prefix_len);
+
+  key_copy = (char *)cache->combined_key.full_key.data + prefix_len;
+  cache->combined_key.entry_key.key_len = aligned_key_len + prefix_len;
+  memcpy(key_copy, key, key_len);
+  memset(key_copy + key_len, 0, aligned_key_len - key_len);
+
+  /* Hash key into 16 bytes. */
+  digest_buffer = (apr_uint32_t *)cache->combined_key.entry_key.fingerprint;
+  svn__fnv1a_32x4_raw(digest_buffer, key, key_len);
+
+  /* Combine with prefix. */
+  cache->combined_key.entry_key.fingerprint[0]
+    ^= cache->prefix.fingerprint[0];
+  cache->combined_key.entry_key.fingerprint[1]
+    ^= cache->prefix.fingerprint[1];
 }
 
 /* Basically calculate a hash value for KEY of length KEY_LEN, combine it
@@ -2657,39 +2862,52 @@ combine_key(svn_membuffer_cache_t *cache
   /* copy of *key, padded with 0 */
   apr_uint64_t data[2];
 
+  /* Do we have to compare full keys? */
+  if (cache->prefix.prefix_idx == NO_INDEX)
+    {
+      combine_long_key(cache, key, key_len);
+      return;
+    }
+
   /* short, fixed-size keys are the most common case */
   if (key_len == 16)
     {
-      data[0] = ((const apr_uint64_t *)key)[0];
-      data[1] = ((const apr_uint64_t *)key)[1];
+      memcpy(data, key, 16);
     }
   else if (key_len == 8)
     {
-      data[0] = ((const apr_uint64_t *)key)[0];
+      memcpy(data, key, 8);
       data[1] = 0;
     }
-  else if (key_len != APR_HASH_KEY_STRING && key_len < 16)
+  else
     {
+      assert(key_len != APR_HASH_KEY_STRING && key_len < 16);
       data[0] = 0;
       data[1] = 0;
       memcpy(data, key, key_len);
     }
-  else
-    {
-      /* longer or variably sized keys */
-      combine_long_key(cache, key, key_len);
-      return;
-    }
 
-  /* scramble key DATA.  All of this must be reversible to prevent key
-   * collisions.  So, we limit ourselves to xor and permutations. */
+  /* Scramble key DATA to spread the key space more evenly across the
+   * cache segments and entry buckets.  All of this shall be reversible
+   * to prevent key collisions.  So, we limit ourselves to xor and
+   * permutations.
+   *
+   * Since the entry key must preserve the full key (prefix and KEY),
+   * the scramble must not introduce KEY collisions.
+   */
   data[1] = (data[1] << 27) | (data[1] >> 37);
   data[1] ^= data[0] & 0xffff;
   data[0] ^= data[1] & APR_UINT64_C(0xffffffffffff0000);
 
-  /* combine with this cache's namespace */
-  cache->combined_key[0] = data[0] ^ cache->prefix[0];
-  cache->combined_key[1] = data[1] ^ cache->prefix[1];
+  /* Combine with this cache's prefix.  This is reversible because the
+   * prefix is known through to the respective entry_key element.  So,
+   * knowing entry_key.prefix_id, we can still reconstruct KEY (and the
+   * prefix key).
+   */
+  cache->combined_key.entry_key.fingerprint[0]
+    = data[0] ^ cache->prefix.fingerprint[0];
+  cache->combined_key.entry_key.fingerprint[1]
+    = data[1] ^ cache->prefix.fingerprint[1];
 }
 
 /* Implement svn_cache__vtable_t.get (not thread-safe)
@@ -2721,7 +2939,7 @@ svn_membuffer_cache_get(void **value_p,
 
   /* Look the item up. */
   SVN_ERR(membuffer_cache_get(cache->membuffer,
-                              cache->combined_key,
+                              &cache->combined_key,
                               value_p,
                               cache->deserializer,
                               DEBUG_CACHE_MEMBUFFER_TAG
@@ -2758,7 +2976,7 @@ svn_membuffer_cache_has_key(svn_boolean_
 
   /* Look the item up. */
   SVN_ERR(membuffer_cache_has_key(cache->membuffer,
-                                  cache->combined_key,
+                                  &cache->combined_key,
                                   found));
 
   /* return result */
@@ -2790,7 +3008,7 @@ svn_membuffer_cache_set(void *cache_void
    * that the item will actually be cached afterwards.
    */
   return membuffer_cache_set(cache->membuffer,
-                             cache->combined_key,
+                             &cache->combined_key,
                              value,
                              cache->serializer,
                              cache->priority,
@@ -2836,7 +3054,7 @@ svn_membuffer_cache_get_partial(void **v
 
   combine_key(cache, key, cache->key_len);
   SVN_ERR(membuffer_cache_get_partial(cache->membuffer,
-                                      cache->combined_key,
+                                      &cache->combined_key,
                                       value_p,
                                       found,
                                       func,
@@ -2864,7 +3082,7 @@ svn_membuffer_cache_set_partial(void *ca
     {
       combine_key(cache, key, cache->key_len);
       SVN_ERR(membuffer_cache_set_partial(cache->membuffer,
-                                          cache->combined_key,
+                                          &cache->combined_key,
                                           func,
                                           baton,
                                           DEBUG_CACHE_MEMBUFFER_TAG
@@ -2936,7 +3154,7 @@ svn_membuffer_cache_get_info(void *cache
 
   /* cache front-end specific data */
 
-  info->id = apr_pstrdup(result_pool, cache->info_prefix);
+  info->id = apr_pstrdup(result_pool, get_prefix_key(cache));
 
   /* collect info from shared cache back-end */
 
@@ -3125,15 +3343,17 @@ svn_cache__create_membuffer_cache(svn_ca
                                   const char *prefix,
                                   apr_uint32_t priority,
                                   svn_boolean_t thread_safe,
+                                  svn_boolean_t short_lived,
                                   apr_pool_t *result_pool,
                                   apr_pool_t *scratch_pool)
 {
   svn_checksum_t *checksum;
+  apr_size_t prefix_len, prefix_orig_len;
 
   /* allocate the cache header structures
    */
   svn_cache__t *wrapper = apr_pcalloc(result_pool, sizeof(*wrapper));
-  svn_membuffer_cache_t *cache = apr_palloc(result_pool, sizeof(*cache));
+  svn_membuffer_cache_t *cache = apr_pcalloc(result_pool, sizeof(*cache));
 
   /* initialize our internal cache header
    */
@@ -3144,32 +3364,64 @@ svn_cache__create_membuffer_cache(svn_ca
   cache->deserializer = deserializer
                       ? deserializer
                       : deserialize_svn_stringbuf;
-  get_prefix_tail(prefix, cache->info_prefix);
   cache->priority = priority;
   cache->key_len = klen;
 
   SVN_ERR(svn_mutex__init(&cache->mutex, thread_safe, result_pool));
 
-  /* for performance reasons, we don't actually store the full prefix but a
-   * hash value of it
-   */
+  /* Copy the prefix into the prefix full key. Align it to ITEM_ALIGMENT.
+   * Don't forget to include the terminating NUL. */
+  prefix_orig_len = strlen(prefix) + 1;
+  prefix_len = ALIGN_VALUE(prefix_orig_len);
+
+  /* Paranoia check to ensure pointer arithmetics work as expected. */
+  if (prefix_orig_len >= SVN_MAX_OBJECT_SIZE)
+    return svn_error_create(SVN_ERR_INCORRECT_PARAMS, NULL,
+                            _("Prefix too long"));
+
+  /* Construct the folded prefix key. */
   SVN_ERR(svn_checksum(&checksum,
                        svn_checksum_md5,
                        prefix,
                        strlen(prefix),
                        scratch_pool));
-  memcpy(cache->prefix, checksum->digest, sizeof(cache->prefix));
+  memcpy(cache->prefix.fingerprint, checksum->digest,
+         sizeof(cache->prefix.fingerprint));
+  cache->prefix.key_len = prefix_len;
+
+  /* Fix-length keys of up to 16 bytes may be handled without storing the
+   * full key separately for each item. */
+  if (   (klen != APR_HASH_KEY_STRING)
+      && (klen <= sizeof(cache->combined_key.entry_key.fingerprint))
+      && !short_lived)
+    SVN_ERR(prefix_pool_get(&cache->prefix.prefix_idx,
+                            membuffer->prefix_pool,
+                            prefix));
+  else
+    cache->prefix.prefix_idx = NO_INDEX;
 
-  /* fix-length keys of 16 bytes or under don't need a buffer because we
-   * can use a very fast key combining algorithm. */
-  if ((klen == APR_HASH_KEY_STRING) ||  klen > sizeof(entry_key_t))
-    {
-      cache->last_access = apr_pcalloc(result_pool, sizeof(*cache->last_access));
-      cache->last_access->key_len = APR_HASH_KEY_STRING;
+  /* If key combining is not guaranteed to produce unique results, we have
+   * to handle full keys.  Otherwise, leave it NULL. */
+  if (cache->prefix.prefix_idx == NO_INDEX)
+    {
+      /* Initialize the combined key. Pre-allocate some extra room in the
+       * full key such that we probably don't need to re-alloc. */
+      cache->combined_key.entry_key = cache->prefix;
+      svn_membuf__create(&cache->combined_key.full_key, prefix_len + 200,
+                         result_pool);
+      memcpy((char *)cache->combined_key.full_key.data, prefix,
+             prefix_orig_len);
+      memset((char *)cache->combined_key.full_key.data + prefix_orig_len, 0,
+             prefix_len - prefix_orig_len);
     }
   else
     {
-      cache->last_access = NULL;
+      /* Initialize the combined key.  We will never have the full combined
+       * key, so leave it NULL and set its length to 0 to prevent access to
+       * it.  Keep the fingerprint 0 as well b/c it will always be set anew
+       * by combine_key(). */
+      cache->combined_key.entry_key.prefix_idx = cache->prefix.prefix_idx;
+      cache->combined_key.entry_key.key_len = 0;
     }
 
   /* initialize the generic cache wrapper

Modified: subversion/branches/ra-git/subversion/libsvn_subr/checksum.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/checksum.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/checksum.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/checksum.c Mon Nov 30 10:24:16 2015
@@ -80,6 +80,14 @@ static const apr_size_t digest_sizes[] =
   sizeof(apr_uint32_t)
 };
 
+/* Checksum type prefixes used in serialized checksums. */
+static const char *ckind_str[] = {
+  "$md5 $",
+  "$sha1$",
+  "$fnv1$",
+  "$fnvm$",
+};
+
 /* Returns the digest size of it's argument. */
 #define DIGESTSIZE(k) \
   (((k) < svn_checksum_md5 || (k) > svn_checksum_fnv1a_32x4) ? 0 : digest_sizes[k])
@@ -317,13 +325,10 @@ svn_checksum_serialize(const svn_checksu
                        apr_pool_t *result_pool,
                        apr_pool_t *scratch_pool)
 {
-  const char *ckind_str;
-
   SVN_ERR_ASSERT_NO_RETURN(checksum->kind >= svn_checksum_md5
                            || checksum->kind <= svn_checksum_fnv1a_32x4);
-  ckind_str = (checksum->kind == svn_checksum_md5 ? "$md5 $" : "$sha1$");
   return apr_pstrcat(result_pool,
-                     ckind_str,
+                     ckind_str[checksum->kind],
                      svn_checksum_to_cstring(checksum, scratch_pool),
                      SVN_VA_NULL);
 }
@@ -335,18 +340,29 @@ svn_checksum_deserialize(const svn_check
                          apr_pool_t *result_pool,
                          apr_pool_t *scratch_pool)
 {
-  svn_checksum_kind_t ckind;
+  svn_checksum_kind_t kind;
   svn_checksum_t *parsed_checksum;
 
-  /* "$md5 $..." or "$sha1$..." */
-  SVN_ERR_ASSERT(strlen(data) > 6);
+  /* All prefixes have the same length. */
+  apr_size_t prefix_len = strlen(ckind_str[0]);
 
-  ckind = (data[1] == 'm' ? svn_checksum_md5 : svn_checksum_sha1);
-  SVN_ERR(svn_checksum_parse_hex(&parsed_checksum, ckind,
-                                 data + 6, result_pool));
-  *checksum = parsed_checksum;
+  /* "$md5 $...", "$sha1$..." or ... */
+  if (strlen(data) <= prefix_len)
+    return svn_error_createf(SVN_ERR_BAD_CHECKSUM_PARSE, NULL,
+                             _("Invalid prefix in checksum '%s'"),
+                             data);
+
+  for (kind = svn_checksum_md5; kind <= svn_checksum_fnv1a_32x4; ++kind)
+    if (strncmp(ckind_str[kind], data, prefix_len) == 0)
+      {
+        SVN_ERR(svn_checksum_parse_hex(&parsed_checksum, kind,
+                                       data + prefix_len, result_pool));
+        *checksum = parsed_checksum;
+        return SVN_NO_ERROR;
+      }
 
-  return SVN_NO_ERROR;
+  return svn_error_createf(SVN_ERR_BAD_CHECKSUM_KIND, NULL,
+                           "Unknown checksum kind in '%s'", data);
 }
 
 
@@ -357,26 +373,42 @@ svn_checksum_parse_hex(svn_checksum_t **
                        apr_pool_t *pool)
 {
   apr_size_t i, len;
-  char is_nonzero = '\0';
-  char *digest;
-  static const char xdigitval[256] =
+  unsigned char is_nonzero = 0;
+  unsigned char *digest;
+  static const unsigned char xdigitval[256] =
     {
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,   /* 0-9 */
-      -1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,   /* A-F */
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,   /* a-f */
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-      -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,   /* 0-7 */
+      0x08,0x09,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,   /* 8-9 */
+      0xFF,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,0xFF,   /* A-F */
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,0xFF,   /* a-f */
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+      0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
     };
 
   if (hex == NULL)
@@ -388,18 +420,18 @@ svn_checksum_parse_hex(svn_checksum_t **
   SVN_ERR(validate_kind(kind));
 
   *checksum = svn_checksum_create(kind, pool);
-  digest = (char *)(*checksum)->digest;
+  digest = (unsigned char *)(*checksum)->digest;
   len = DIGESTSIZE(kind);
 
   for (i = 0; i < len; i++)
     {
-      char x1 = xdigitval[(unsigned char)hex[i * 2]];
-      char x2 = xdigitval[(unsigned char)hex[i * 2 + 1]];
-      if (x1 == (char)-1 || x2 == (char)-1)
+      unsigned char x1 = xdigitval[(unsigned char)hex[i * 2]];
+      unsigned char x2 = xdigitval[(unsigned char)hex[i * 2 + 1]];
+      if (x1 == 0xFF || x2 == 0xFF)
         return svn_error_create(SVN_ERR_BAD_CHECKSUM_PARSE, NULL, NULL);
 
-      digest[i] = (char)((x1 << 4) | x2);
-      is_nonzero |= (char)((x1 << 4) | x2);
+      digest[i] = (x1 << 4) | x2;
+      is_nonzero |= digest[i];
     }
 
   if (!is_nonzero)

Modified: subversion/branches/ra-git/subversion/libsvn_subr/cmdline.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/cmdline.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/cmdline.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/cmdline.c Mon Nov 30 10:24:16 2015
@@ -538,19 +538,20 @@ trust_server_cert_non_interactive(svn_au
                                   apr_pool_t *pool)
 {
   struct trust_server_cert_non_interactive_baton *b = baton;
+  apr_uint32_t non_ignored_failures;
   *cred_p = NULL;
 
-  if (failures == 0 ||
-      (b->trust_server_cert_unknown_ca &&
-       (failures & SVN_AUTH_SSL_UNKNOWNCA)) ||
-      (b->trust_server_cert_cn_mismatch &&
-       (failures & SVN_AUTH_SSL_CNMISMATCH)) ||
-      (b->trust_server_cert_expired &&
-       (failures & SVN_AUTH_SSL_EXPIRED)) ||
-      (b->trust_server_cert_not_yet_valid &&
-        (failures & SVN_AUTH_SSL_NOTYETVALID)) ||
-      (b->trust_server_cert_other_failure &&
-        (failures & SVN_AUTH_SSL_OTHER)))
+  /* Mask away bits we are instructed to ignore. */
+  non_ignored_failures = failures & ~(
+        (b->trust_server_cert_unknown_ca ? SVN_AUTH_SSL_UNKNOWNCA : 0)
+      | (b->trust_server_cert_cn_mismatch ? SVN_AUTH_SSL_CNMISMATCH : 0)
+      | (b->trust_server_cert_expired ? SVN_AUTH_SSL_EXPIRED : 0)
+      | (b->trust_server_cert_not_yet_valid ? SVN_AUTH_SSL_NOTYETVALID : 0)
+      | (b->trust_server_cert_other_failure ? SVN_AUTH_SSL_OTHER : 0)
+  );
+
+  /* If no failures remain, accept the certificate. */
+  if (non_ignored_failures == 0)
     {
       *cred_p = apr_pcalloc(pool, sizeof(**cred_p));
       (*cred_p)->may_save = FALSE;
@@ -810,9 +811,124 @@ svn_cmdline__print_xml_prop(svn_stringbu
   return;
 }
 
+/* Return the most similar string to NEEDLE in HAYSTACK, which contains
+ * HAYSTACK_LEN elements.  Return NULL if no string is sufficiently similar.
+ */
+/* See svn_cl__similarity_check() for a more general solution. */
+static const char *
+most_similar(const char *needle_cstr,
+             const char **haystack,
+             apr_size_t haystack_len,
+             apr_pool_t *scratch_pool)
+{
+  const char *max_similar;
+  apr_size_t max_score = 0;
+  apr_size_t i;
+  svn_membuf_t membuf;
+  svn_string_t *needle_str = svn_string_create(needle_cstr, scratch_pool);
+
+  svn_membuf__create(&membuf, 64, scratch_pool);
+
+  for (i = 0; i < haystack_len; i++)
+    {
+      apr_size_t score;
+      svn_string_t *hay = svn_string_create(haystack[i], scratch_pool);
+
+      score = svn_string__similarity(needle_str, hay, &membuf, NULL);
+
+      /* If you update this factor, consider updating
+       * svn_cl__similarity_check(). */
+      if (score >= (2 * SVN_STRING__SIM_RANGE_MAX + 1) / 3
+          && score > max_score)
+        {
+          max_score = score;
+          max_similar = haystack[i];
+        }
+    }
+
+  if (max_score)
+    return max_similar;
+  else
+    return NULL;
+}
+
+/* Verify that NEEDLE is in HAYSTACK, which contains HAYSTACK_LEN elements. */
+static svn_error_t *
+string_in_array(const char *needle,
+                const char **haystack,
+                apr_size_t haystack_len,
+                apr_pool_t *scratch_pool)
+{
+  const char *next_of_kin;
+  apr_size_t i;
+  for (i = 0; i < haystack_len; i++)
+    {
+      if (!strcmp(needle, haystack[i]))
+        return SVN_NO_ERROR;
+    }
+
+  /* Error. */
+  next_of_kin = most_similar(needle, haystack, haystack_len, scratch_pool);
+  if (next_of_kin)
+    return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+                             _("Ignoring unknown value '%s'; "
+                               "did you mean '%s'?"),
+                             needle, next_of_kin);
+  else
+    return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+                             _("Ignoring unknown value '%s'"),
+                             needle);
+}
+
+#include "config_keys.inc"
+
+/* Validate the FILE, SECTION, and OPTION components of CONFIG_OPTION are
+ * known.  Return an error if not.  (An unknown value may be either a typo
+ * or added in a newer minor version of Subversion.) */
+static svn_error_t *
+validate_config_option(svn_cmdline__config_argument_t *config_option,
+                       apr_pool_t *scratch_pool)
+{
+  svn_boolean_t arbitrary_keys = FALSE;
+
+  /* TODO: some day, we could also verify that OPTION is valid for SECTION;
+     i.e., forbid invalid combinations such as config:auth:diff-extensions. */
+
+#define ARRAYLEN(x) ( sizeof((x)) / sizeof((x)[0]) )
+
+  SVN_ERR(string_in_array(config_option->file, svn__valid_config_files,
+                          ARRAYLEN(svn__valid_config_files),
+                          scratch_pool));
+  SVN_ERR(string_in_array(config_option->section, svn__valid_config_sections,
+                          ARRAYLEN(svn__valid_config_sections),
+                          scratch_pool));
+
+  /* Don't validate option names for sections such as servers[group],
+   * config[tunnels], and config[auto-props] that permit arbitrary options. */
+    {
+      int i;
+
+      for (i = 0; i < ARRAYLEN(svn__empty_config_sections); i++)
+        {
+        if (!strcmp(config_option->section, svn__empty_config_sections[i]))
+          arbitrary_keys = TRUE;
+        }
+    }
+
+  if (! arbitrary_keys)
+    SVN_ERR(string_in_array(config_option->option, svn__valid_config_options,
+                            ARRAYLEN(svn__valid_config_options),
+                            scratch_pool));
+
+#undef ARRAYLEN
+
+  return SVN_NO_ERROR;
+}
+
 svn_error_t *
 svn_cmdline__parse_config_option(apr_array_header_t *config_options,
                                  const char *opt_arg,
+                                 const char *prefix,
                                  apr_pool_t *pool)
 {
   svn_cmdline__config_argument_t *config_option;
@@ -826,6 +942,8 @@ svn_cmdline__parse_config_option(apr_arr
           if ((equals_sign = strchr(second_colon + 1, '=')) &&
               (equals_sign != second_colon + 1))
             {
+              svn_error_t *warning;
+
               config_option = apr_pcalloc(pool, sizeof(*config_option));
               config_option->file = apr_pstrndup(pool, opt_arg,
                                                  first_colon - opt_arg);
@@ -834,6 +952,13 @@ svn_cmdline__parse_config_option(apr_arr
               config_option->option = apr_pstrndup(pool, second_colon + 1,
                                                    equals_sign - second_colon - 1);
 
+              warning = validate_config_option(config_option, pool);
+              if (warning)
+                {
+                  svn_handle_warning2(stderr, warning, prefix);
+                  svn_error_clear(warning);
+                }
+
               if (! (strchr(config_option->option, ':')))
                 {
                   config_option->value = apr_pstrndup(pool, equals_sign + 1,
@@ -1046,6 +1171,36 @@ svn_cmdline__print_xml_prop_hash(svn_str
 }
 
 svn_boolean_t
+svn_cmdline__stdin_is_a_terminal(void)
+{
+#ifdef WIN32
+  return (_isatty(STDIN_FILENO) != 0);
+#else
+  return (isatty(STDIN_FILENO) != 0);
+#endif
+}
+
+svn_boolean_t
+svn_cmdline__stdout_is_a_terminal(void)
+{
+#ifdef WIN32
+  return (_isatty(STDOUT_FILENO) != 0);
+#else
+  return (isatty(STDOUT_FILENO) != 0);
+#endif
+}
+
+svn_boolean_t
+svn_cmdline__stderr_is_a_terminal(void)
+{
+#ifdef WIN32
+  return (_isatty(STDERR_FILENO) != 0);
+#else
+  return (isatty(STDERR_FILENO) != 0);
+#endif
+}
+
+svn_boolean_t
 svn_cmdline__be_interactive(svn_boolean_t non_interactive,
                             svn_boolean_t force_interactive)
 {
@@ -1404,3 +1559,50 @@ svn_cmdline__edit_string_externally(svn_
 
   return svn_error_trace(err);
 }
+
+svn_error_t *
+svn_cmdline__parse_trust_options(
+                        svn_boolean_t *trust_server_cert_unknown_ca,
+                        svn_boolean_t *trust_server_cert_cn_mismatch,
+                        svn_boolean_t *trust_server_cert_expired,
+                        svn_boolean_t *trust_server_cert_not_yet_valid,
+                        svn_boolean_t *trust_server_cert_other_failure,
+                        const char *opt_arg,
+                        apr_pool_t *scratch_pool)
+{
+  apr_array_header_t *failures;
+  int i;
+
+  *trust_server_cert_unknown_ca = FALSE;
+  *trust_server_cert_cn_mismatch = FALSE;
+  *trust_server_cert_expired = FALSE;
+  *trust_server_cert_not_yet_valid = FALSE;
+  *trust_server_cert_other_failure = FALSE;
+
+  failures = svn_cstring_split(opt_arg, ", \n\r\t\v", TRUE, scratch_pool);
+
+  for (i = 0; i < failures->nelts; i++)
+    {
+      const char *value = APR_ARRAY_IDX(failures, i, const char *);
+      if (!strcmp(value, "unknown-ca"))
+        *trust_server_cert_unknown_ca = TRUE;
+      else if (!strcmp(value, "cn-mismatch"))
+        *trust_server_cert_cn_mismatch = TRUE;
+      else if (!strcmp(value, "expired"))
+        *trust_server_cert_expired = TRUE;
+      else if (!strcmp(value, "not-yet-valid"))
+        *trust_server_cert_not_yet_valid = TRUE;
+      else if (!strcmp(value, "other"))
+        *trust_server_cert_other_failure = TRUE;
+      else
+        return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+                                  _("Unknown value '%s' for %s.\n"
+                                    "Supported values: %s"),
+                                  value,
+                                  "--trust-server-cert-failures",
+                                  "unknown-ca, cn-mismatch, expired, "
+                                  "not-yet-valid, other");
+    }
+
+  return SVN_NO_ERROR;
+}

Modified: subversion/branches/ra-git/subversion/libsvn_subr/compress.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/compress.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/compress.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/compress.c Mon Nov 30 10:24:16 2015
@@ -31,6 +31,21 @@
 
 #include "svn_private_config.h"
 
+const char *
+svn_zlib__compiled_version(void)
+{
+  static const char zlib_version_str[] = ZLIB_VERSION;
+
+  return zlib_version_str;
+}
+
+const char *
+svn_zlib__runtime_version(void)
+{
+  return zlibVersion();
+}
+
+
 /* The zlib compressBound function was not exported until 1.2.0. */
 #if ZLIB_VERNUM >= 0x1200
 #define svnCompressBound(LEN) compressBound(LEN)
@@ -67,6 +82,17 @@ svn__encode_uint(unsigned char *p, apr_u
   return p;
 }
 
+unsigned char *
+svn__encode_int(unsigned char *p, apr_int64_t val)
+{
+  apr_uint64_t value = val;
+  value = value & APR_UINT64_C(0x8000000000000000)
+        ? APR_UINT64_MAX - (2 * value)
+        : 2 * value;
+
+  return svn__encode_uint(p, value);
+}
+
 const unsigned char *
 svn__decode_uint(apr_uint64_t *val,
                  const unsigned char *p,
@@ -74,7 +100,7 @@ svn__decode_uint(apr_uint64_t *val,
 {
   apr_uint64_t temp = 0;
 
-  if (p + SVN__MAX_ENCODED_UINT_LEN < end)
+  if (end - p > SVN__MAX_ENCODED_UINT_LEN)
     end = p + SVN__MAX_ENCODED_UINT_LEN;
 
   /* Decode bytes until we're done. */
@@ -96,6 +122,22 @@ svn__decode_uint(apr_uint64_t *val,
   return NULL;
 }
 
+const unsigned char *
+svn__decode_int(apr_int64_t *val,
+                const unsigned char *p,
+                const unsigned char *end)
+{
+  apr_uint64_t value;
+  const unsigned char *result = svn__decode_uint(&value, p, end);
+
+  value = value & 1
+        ? (APR_UINT64_MAX - value / 2)
+        : value / 2;
+  *val = (apr_int64_t)value;
+
+  return result;
+}
+
 /* If IN is a string that is >= MIN_COMPRESS_SIZE and the COMPRESSION_LEVEL
    is not SVN_DELTA_COMPRESSION_LEVEL_NONE, zlib compress it and places the
    result in OUT, with an integer prepended specifying the original size.
@@ -220,7 +262,7 @@ zlib_decode(const unsigned char *in, apr
 }
 
 svn_error_t *
-svn__compress(svn_stringbuf_t *in,
+svn__compress(const void *data, apr_size_t len,
               svn_stringbuf_t *out,
               int compression_method)
 {
@@ -230,13 +272,13 @@ svn__compress(svn_stringbuf_t *in,
                              _("Unsupported compression method %d"),
                              compression_method);
 
-  return zlib_encode(in->data, in->len, out, compression_method);
+  return zlib_encode(data, len, out, compression_method);
 }
 
 svn_error_t *
-svn__decompress(svn_stringbuf_t *in,
+svn__decompress(const void *data, apr_size_t len,
                 svn_stringbuf_t *out,
                 apr_size_t limit)
 {
-  return zlib_decode((const unsigned char*)in->data, in->len, out, limit);
+  return zlib_decode(data, len, out, limit);
 }

Modified: subversion/branches/ra-git/subversion/libsvn_subr/config.c
URL: http://svn.apache.org/viewvc/subversion/branches/ra-git/subversion/libsvn_subr/config.c?rev=1717223&r1=1717222&r2=1717223&view=diff
==============================================================================
--- subversion/branches/ra-git/subversion/libsvn_subr/config.c (original)
+++ subversion/branches/ra-git/subversion/libsvn_subr/config.c Mon Nov 30 10:24:16 2015
@@ -55,6 +55,27 @@ struct cfg_section_t
 };
 
 
+/* States that a config option value can assume. */
+typedef enum option_state_t
+{
+  /* Value still needs to be expanded.
+     This is the initial state for *all* values. */
+  option_state_needs_expanding,
+
+  /* Value is currently being expanded.
+     This transitional state allows for detecting cyclic dependencies. */
+  option_state_expanding,
+
+  /* Expanded value is available.
+     Values that never needed expanding directly go into that state
+     skipping option_state_expanding. */
+  option_state_expanded,
+
+  /* The value expansion is cyclic which results in "undefined" behavior.
+     This is to return a defined value ("") in that case. */
+  option_state_cyclic
+} option_state_t;
+
 /* Option table entries. */
 typedef struct cfg_option_t cfg_option_t;
 struct cfg_option_t
@@ -71,10 +92,10 @@ struct cfg_option_t
   /* The expanded option value. */
   const char *x_value;
 
-  /* Expansion flag. If this is TRUE, this value has already been expanded.
-     In this case, if x_value is NULL, no expansions were necessary,
-     and value should be used directly. */
-  svn_boolean_t expanded;
+  /* Expansion state. If this is option_state_expanded, VALUE has already
+     been expanded.  In this case, if x_value is NULL, no expansions were
+     necessary, and value should be used directly. */
+  option_state_t state;
 };
 
 
@@ -338,22 +359,14 @@ for_each_option(svn_config_t *cfg, void
        sec_ndx != NULL;
        sec_ndx = apr_hash_next(sec_ndx))
     {
-      void *sec_ptr;
-      cfg_section_t *sec;
+      cfg_section_t *sec = apr_hash_this_val(sec_ndx);
       apr_hash_index_t *opt_ndx;
 
-      apr_hash_this(sec_ndx, NULL, NULL, &sec_ptr);
-      sec = sec_ptr;
-
       for (opt_ndx = apr_hash_first(pool, sec->options);
            opt_ndx != NULL;
            opt_ndx = apr_hash_next(opt_ndx))
         {
-          void *opt_ptr;
-          cfg_option_t *opt;
-
-          apr_hash_this(opt_ndx, NULL, NULL, &opt_ptr);
-          opt = opt_ptr;
+          cfg_option_t *opt = apr_hash_this_val(opt_ndx);
 
           if (callback(baton, sec, opt))
             return;
@@ -396,12 +409,13 @@ svn_config_merge(svn_config_t *cfg, cons
 static svn_boolean_t
 rmex_callback(void *baton, cfg_section_t *section, cfg_option_t *option)
 {
-  /* Only clear the `expanded' flag if the value actually contains
+  /* Only reset the expansion state if the value actually contains
      variable expansions. */
-  if (option->expanded && option->x_value != NULL)
+  if (   (option->state == option_state_expanded && option->x_value != NULL)
+      || option->state == option_state_cyclic)
     {
       option->x_value = NULL;
-      option->expanded = FALSE;
+      option->state = option_state_needs_expanding;
     }
 
   return FALSE;
@@ -482,7 +496,7 @@ find_option(svn_config_t *cfg, const cha
 
 
 /* Has a bi-directional dependency with make_string_from_option(). */
-static void
+static svn_boolean_t
 expand_option_value(svn_config_t *cfg, cfg_section_t *section,
                     const char *opt_value, const char **opt_x_valuep,
                     apr_pool_t *x_pool);
@@ -496,7 +510,20 @@ make_string_from_option(const char **val
                         apr_pool_t* x_pool)
 {
   /* Expand the option value if necessary. */
-  if (!opt->expanded)
+  if (   opt->state == option_state_expanding
+      || opt->state == option_state_cyclic)
+    {
+      /* Recursion is not supported.  Since we can't produce an error
+       * nor should we abort the process, the next best thing is to
+       * report the recursive part as an empty string. */
+      *valuep = "";
+
+      /* Go into "value undefined" state. */
+      opt->state = option_state_cyclic;
+
+      return;
+    }
+  else if (opt->state == option_state_needs_expanding)
     {
       /* before attempting to expand an option, check for the placeholder.
        * If none is there, there is no point in calling expand_option_value.
@@ -511,9 +538,16 @@ make_string_from_option(const char **val
 
           tmp_pool = (x_pool ? x_pool : svn_pool_create(cfg->x_pool));
 
-          expand_option_value(cfg, section, opt->value, &opt->x_value, tmp_pool);
-          opt->expanded = TRUE;
+          /* Expand the value. During that process, have the option marked
+           * as "expanding" to detect cycles. */
+          opt->state = option_state_expanding;
+          if (expand_option_value(cfg, section, opt->value, &opt->x_value,
+                                  tmp_pool))
+            opt->state = option_state_expanded;
+          else
+            opt->state = option_state_cyclic;
 
+          /* Ensure the expanded value is allocated in a permanent pool. */
           if (x_pool != cfg->x_pool)
             {
               /* Grab the fully expanded value from tmp_pool before its
@@ -527,7 +561,7 @@ make_string_from_option(const char **val
         }
       else
         {
-          opt->expanded = TRUE;
+          opt->state = option_state_expanded;
         }
     }
 
@@ -549,8 +583,9 @@ make_string_from_option(const char **val
 
 /* Expand OPT_VALUE (which may be NULL) in SECTION into *OPT_X_VALUEP.
    If no variable replacements are done, set *OPT_X_VALUEP to
-   NULL. Allocate from X_POOL. */
-static void
+   NULL.  Return TRUE if the expanded value is defined and FALSE
+   for recursive definitions.  Allocate from X_POOL. */
+static svn_boolean_t
 expand_option_value(svn_config_t *cfg, cfg_section_t *section,
                     const char *opt_value, const char **opt_x_valuep,
                     apr_pool_t *x_pool)
@@ -587,6 +622,18 @@ expand_option_value(svn_config_t *cfg, c
                  should terminate. */
               make_string_from_option(&cstring, cfg, section, x_opt, x_pool);
 
+              /* Values depending on cyclic values must also be marked as
+               * "undefined" because they might themselves form cycles with
+               * the one cycle we just detected.  Due to the early abort of
+               * the recursion, we won't follow and thus detect dependent
+               * cycles anymore.
+               */
+              if (x_opt->state == option_state_cyclic)
+                {
+                  *opt_x_valuep = "";
+                  return FALSE;
+                }
+
               /* Append the plain text preceding the expansion. */
               len = name_start - FMT_START_LEN - copy_from;
               if (buf == NULL)
@@ -625,6 +672,9 @@ expand_option_value(svn_config_t *cfg, c
     }
   else
     *opt_x_valuep = NULL;
+
+  /* Expansion has a well-defined answer. */
+  return TRUE;
 }
 
 static cfg_section_t *
@@ -664,7 +714,7 @@ svn_config_create_option(cfg_option_t **
 
   o->value = apr_pstrdup(pool, value);
   o->x_value = NULL;
-  o->expanded = FALSE;
+  o->state = option_state_needs_expanding;
 
   *opt = o;
 }
@@ -685,7 +735,8 @@ svn_config__is_expanded(svn_config_t *cf
     return FALSE;
 
   /* already expanded? */
-  if (opt->expanded)
+  if (   opt->state == option_state_expanded
+      || opt->state == option_state_cyclic)
     return TRUE;
 
   /* needs expansion? */
@@ -719,8 +770,14 @@ svn_config_get(svn_config_t *cfg, const
           {
             apr_pool_t *tmp_pool = svn_pool_create(cfg->pool);
             const char *x_default;
-            expand_option_value(cfg, sec, default_value, &x_default, tmp_pool);
-            if (x_default)
+            if (!expand_option_value(cfg, sec, default_value, &x_default,
+                                     tmp_pool))
+              {
+                /* Recursive definitions are not supported.
+                   Normalize the answer in that case. */
+                *valuep = "";
+              }
+            else if (x_default)
               {
                 svn_stringbuf_set(cfg->tmp_value, x_default);
                 *valuep = cfg->tmp_value->data;
@@ -758,7 +815,7 @@ svn_config_set(svn_config_t *cfg,
     {
       /* Replace the option's value. */
       opt->value = apr_pstrdup(cfg->pool, value);
-      opt->expanded = FALSE;
+      opt->state = option_state_needs_expanding;
       return;
     }
 
@@ -965,11 +1022,8 @@ svn_config_enumerate_sections2(svn_confi
        sec_ndx != NULL;
        sec_ndx = apr_hash_next(sec_ndx))
     {
-      void *sec_ptr;
-      cfg_section_t *sec;
+      cfg_section_t *sec = apr_hash_this_val(sec_ndx);
 
-      apr_hash_this(sec_ndx, NULL, NULL, &sec_ptr);
-      sec = sec_ptr;
       ++count;
       svn_pool_clear(iteration_pool);
       if (!callback(sec->name, baton, iteration_pool))
@@ -1001,13 +1055,9 @@ svn_config_enumerate(svn_config_t *cfg,
        opt_ndx != NULL;
        opt_ndx = apr_hash_next(opt_ndx))
     {
-      void *opt_ptr;
-      cfg_option_t *opt;
+      cfg_option_t *opt = apr_hash_this_val(opt_ndx);
       const char *temp_value;
 
-      apr_hash_this(opt_ndx, NULL, NULL, &opt_ptr);
-      opt = opt_ptr;
-
       ++count;
       make_string_from_option(&temp_value, cfg, sec, opt, NULL);
       if (!callback(opt->name, temp_value, baton))
@@ -1039,13 +1089,9 @@ svn_config_enumerate2(svn_config_t *cfg,
        opt_ndx != NULL;
        opt_ndx = apr_hash_next(opt_ndx))
     {
-      void *opt_ptr;
-      cfg_option_t *opt;
+      cfg_option_t *opt = apr_hash_this_val(opt_ndx);
       const char *temp_value;
 
-      apr_hash_this(opt_ndx, NULL, NULL, &opt_ptr);
-      opt = opt_ptr;
-
       ++count;
       make_string_from_option(&temp_value, cfg, sec, opt, NULL);
       svn_pool_clear(iteration_pool);
@@ -1171,7 +1217,7 @@ svn_config_dup(svn_config_t **cfgp,
 
       destopt->value = apr_pstrdup(pool, srcopt->value);
       destopt->x_value = apr_pstrdup(pool, srcopt->x_value);
-      destopt->expanded = srcopt->expanded;
+      destopt->state = srcopt->state;
       apr_hash_set(destsec->options,
                    apr_pstrdup(pool, (const char*)optkey),
                    optkeyLength, destopt);