You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by br...@apache.org on 2013/05/07 15:26:27 UTC
svn commit: r1479901 [3/7] - in /subversion/branches/wc-collate-path: ./
build/ac-macros/ build/generator/ build/generator/templates/
contrib/server-side/fsfsfixer/ contrib/server-side/fsfsfixer/fixer/
subversion/bindings/ctypes-python/ subversion/bind...
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-membuffer.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-membuffer.c Tue May 7 13:26:25 2013
@@ -45,6 +45,8 @@
*
* 1. A linear data buffer containing cached items in a serialized
* representation. There may be arbitrary gaps between entries.
+ * This buffer is sub-devided into (currently two) cache levels.
+ *
* 2. A directory of cache entries. This is organized similar to CPU
* data caches: for every possible key, there is exactly one group
* of entries that may contain the header info for an item with
@@ -56,23 +58,30 @@
* between different processes and / or to persist them on disk. These
* out-of-process features have not been implemented, yet.
*
+ * Superficially, cache levels are being used as usual: insertion happens
+ * into L1 and evictions will promote items to L2. But their whole point
+ * is a different one. L1 uses a circular buffer, i.e. we have perfect
+ * caching for the last N bytes where N is the size of L1. L2 uses a more
+ * elaborate scheme based on priorities and hit counts as described below.
+ *
* The data buffer usage information is implicitly given by the directory
* entries. Every USED entry has a reference to the previous and the next
* used dictionary entry and this double-linked list is ordered by the
* offsets of their item data within the data buffer. So removing data,
* for instance, is done simply by unlinking it from the chain, implicitly
* marking the entry as well as the data buffer section previously
- * associated to it as unused.
+ * associated to it as unused. First and last element of that chain are
+ * being referenced from the respective cache level.
*
- * Insertion can occur at only one, sliding position. It is marked by its
- * offset in the data buffer plus the index of the first used entry at or
- * behind that position. If this gap is too small to accommodate the new
- * item, the insertion window is extended as described below. The new entry
- * will always be inserted at the bottom end of the window and since the
- * next used entry is known, properly sorted insertion is possible.
+ * Insertion can occur at only one, sliding position per cache level. It is
+ * marked by its offset in the data buffer and the index of the first used
+ * entry at or behind that position. If this gap is too small to accommodate
+ * the new item, the insertion window is extended as described below. The new
+ * entry will always be inserted at the bottom end of the window and since
+ * the next used entry is known, properly sorted insertion is possible.
*
* To make the cache perform robustly in a wide range of usage scenarios,
- * a randomized variant of LFU is used (see ensure_data_insertable for
+ * L2 uses a randomized variant of LFU (see ensure_data_insertable_l2 for
* details). Every item holds a read hit counter and there is a global read
* hit counter. The more hits an entry has in relation to the average, the
* more it is likely to be kept using a rand()-based condition. The test is
@@ -86,10 +95,10 @@
* they get not used for a while. Also, even a cache thrashing situation
* about 50% of the content survives every 50% of the cache being re-written
* with new entries. For details on the fine-tuning involved, see the
- * comments in ensure_data_insertable().
+ * comments in ensure_data_insertable_l2().
*
* To limit the entry size and management overhead, not the actual item keys
- * but only their MD5 checksums will not be stored. This is reasonably safe
+ * but only their MD5-based hashes will be stored. This is reasonably safe
* to do since users have only limited control over the full keys, even if
* these contain folder paths. So, it is very hard to deliberately construct
* colliding keys. Random checksum collisions can be shown to be extremely
@@ -313,7 +322,7 @@ static svn_error_t* assert_equal_tags(co
/* A single dictionary entry. Since all entries will be allocated once
* during cache creation, those entries might be either used or unused.
* An entry is used if and only if it is contained in the doubly-linked
- * list of used entries.
+ * list of used entries per cache level.
*/
typedef struct entry_t
{
@@ -321,7 +330,8 @@ typedef struct entry_t
*/
entry_key_t key;
- /* The offset of the cached item's serialized data within the data buffer.
+ /* The offset of the cached item's serialized data within the caches
+ * DATA buffer.
*/
apr_uint64_t offset;
@@ -337,15 +347,15 @@ typedef struct entry_t
/* Reference to the next used entry in the order defined by offset.
* NO_INDEX indicates the end of the list; this entry must be referenced
- * by the caches membuffer_cache_t.last member. NO_INDEX also implies
- * that the data buffer is not used beyond offset+size.
+ * by the caches cache_level_t.last member. NO_INDEX also implies that
+ * the data buffer is not used beyond offset+size.
* Only valid for used entries.
*/
apr_uint32_t next;
/* Reference to the previous used entry in the order defined by offset.
* NO_INDEX indicates the end of the list; this entry must be referenced
- * by the caches membuffer_cache_t.first member.
+ * by the caches cache_level_t.first member.
* Only valid for used entries.
*/
apr_uint32_t previous;
@@ -368,28 +378,12 @@ typedef struct entry_group_t
entry_t entries[GROUP_SIZE];
} entry_group_t;
-/* The cache header structure.
+/* Per-cache level header structure. Instances of this are members of
+ * svn_membuffer_t and will use non-overlapping sections of its DATA buffer.
+ * All offset values are global / absolute to that whole buffer.
*/
-struct svn_membuffer_t
+typedef struct cache_level_t
{
- /* Number of cache segments. Must be a power of 2.
- Please note that this structure represents only one such segment
- and that all segments must / will report the same values here. */
- apr_uint32_t segment_count;
-
- /* The dictionary, GROUP_SIZE * group_count entries long. Never NULL.
- */
- entry_group_t *directory;
-
- /* Flag array with group_count / GROUP_INIT_GRANULARITY _bit_ elements.
- * Allows for efficiently marking groups as "not initialized".
- */
- unsigned char *group_initialized;
-
- /* Size of dictionary in groups. Must be > 0.
- */
- apr_uint32_t group_count;
-
/* Reference to the first (defined by the order content in the data
* buffer) dictionary entry used by any data item.
* NO_INDEX for an empty cache.
@@ -410,18 +404,46 @@ struct svn_membuffer_t
apr_uint32_t next;
- /* Pointer to the data buffer, data_size bytes long. Never NULL.
+ /* First offset in the caches DATA buffer that belongs to this level.
*/
- unsigned char *data;
+ apr_uint64_t start_offset;
- /* Size of data buffer in bytes. Must be > 0.
+ /* Size of data buffer allocated to this level in bytes. Must be > 0.
*/
- apr_uint64_t data_size;
+ apr_uint64_t size;
/* Offset in the data buffer where the next insertion shall occur.
*/
apr_uint64_t current_data;
+} cache_level_t;
+
+/* The cache header structure.
+ */
+struct svn_membuffer_t
+{
+ /* Number of cache segments. Must be a power of 2.
+ Please note that this structure represents only one such segment
+ and that all segments must / will report the same values here. */
+ apr_uint32_t segment_count;
+
+ /* The dictionary, GROUP_SIZE * group_count entries long. Never NULL.
+ */
+ entry_group_t *directory;
+
+ /* Flag array with group_count / GROUP_INIT_GRANULARITY _bit_ elements.
+ * Allows for efficiently marking groups as "not initialized".
+ */
+ unsigned char *group_initialized;
+
+ /* Size of dictionary in groups. Must be > 0.
+ */
+ apr_uint32_t group_count;
+
+ /* Pointer to the data buffer, data_size bytes long. Never NULL.
+ */
+ unsigned char *data;
+
/* Total number of data buffer bytes in use. This is for statistics only.
*/
apr_uint64_t data_used;
@@ -431,6 +453,24 @@ struct svn_membuffer_t
*/
apr_uint64_t max_entry_size;
+ /* The cache levels, organized as sub-buffers. Since entries in the
+ * DIRECTORY use offsets in DATA for addressing, a cache lookup does
+ * not need to know the cache level of a specific item. Cache levels
+ * are only used to implement a hybrid insertion / eviction strategy.
+ */
+
+ /* First cache level, i.e. most insertions happen here. Very large
+ * items might get inserted directly into L2. L1 is a strict FIFO
+ * ring buffer that does not care about item priorities. All evicted
+ * items get a chance to be promoted to L2.
+ */
+ cache_level_t l1;
+
+ /* Second cache level, i.e. data evicted from L1 will be added here
+ * if the item is "important" enough or the L2 insertion window is large
+ * enough.
+ */
+ cache_level_t l2;
/* Number of used dictionary entries, i.e. number of cached items.
* In conjunction with hit_count, this is used calculate the average
@@ -621,6 +661,96 @@ get_index(svn_membuffer_t *cache, entry_
+ (apr_uint32_t)(entry - cache->directory[group_index].entries);
}
+/* Return the cache level of ENTRY in CACHE.
+ */
+static cache_level_t *
+get_cache_level(svn_membuffer_t *cache, entry_t *entry)
+{
+ return entry->offset < cache->l1.size ? &cache->l1
+ : &cache->l2;
+}
+
+/* Insert ENTRY to the chain of items that belong to LEVEL in CACHE. IDX
+ * is ENTRY's item index and is only given for efficiency. The insertion
+ * takes place just before LEVEL->NEXT. *CACHE will not be modified.
+ */
+static void
+chain_entry(svn_membuffer_t *cache,
+ cache_level_t *level,
+ entry_t *entry,
+ apr_uint32_t idx)
+{
+ /* insert ENTRY before this item */
+ entry_t *next = level->next == NO_INDEX
+ ? NULL
+ : get_entry(cache, level->next);
+ assert(idx == get_index(cache, entry));
+
+ /* update entry chain
+ */
+ entry->next = level->next;
+ if (level->first == NO_INDEX)
+ {
+ /* insert as the first entry and only in the chain
+ */
+ entry->previous = NO_INDEX;
+ level->last = idx;
+ level->first = idx;
+ }
+ else if (next == NULL)
+ {
+ /* insert as the last entry in the chain.
+ * Note that it cannot also be at the beginning of the chain.
+ */
+ entry->previous = level->last;
+ get_entry(cache, level->last)->next = idx;
+ level->last = idx;
+ }
+ else
+ {
+ /* insert either at the start of a non-empty list or
+ * somewhere in the middle
+ */
+ entry->previous = next->previous;
+ next->previous = idx;
+
+ if (entry->previous != NO_INDEX)
+ get_entry(cache, entry->previous)->next = idx;
+ else
+ level->first = idx;
+ }
+}
+
+/* Remove ENTRY from the chain of items that belong to LEVEL in CACHE. IDX
+ * is ENTRY's item index and is only given for efficiency. Please note
+ * that neither *CACHE nor *ENTRY will not be modified.
+ */
+static void
+unchain_entry(svn_membuffer_t *cache,
+ cache_level_t *level,
+ entry_t *entry,
+ apr_uint32_t idx)
+{
+ assert(idx == get_index(cache, entry));
+
+ /* update
+ */
+ if (level->next == idx)
+ level->next = entry->next;
+
+ /* unlink it from the chain of used entries
+ */
+ if (entry->previous == NO_INDEX)
+ level->first = entry->next;
+ else
+ get_entry(cache, entry->previous)->next = entry->next;
+
+ if (entry->next == NO_INDEX)
+ level->last = entry->previous;
+ else
+ get_entry(cache, entry->next)->previous = entry->previous;
+}
+
/* Remove the used ENTRY from the CACHE, i.e. make it "unused".
* In contrast to insertion, removal is possible for any entry.
*/
@@ -633,6 +763,7 @@ drop_entry(svn_membuffer_t *cache, entry
apr_uint32_t group_index = idx / GROUP_SIZE;
entry_group_t *group = &cache->directory[group_index];
apr_uint32_t last_in_group = group_index * GROUP_SIZE + group->used - 1;
+ cache_level_t *level = get_cache_level(cache, entry);
/* Only valid to be called for used entries.
*/
@@ -646,39 +777,31 @@ drop_entry(svn_membuffer_t *cache, entry
/* extend the insertion window, if the entry happens to border it
*/
- if (idx == cache->next)
- cache->next = entry->next;
+ if (idx == level->next)
+ level->next = entry->next;
else
- if (entry->next == cache->next)
+ if (entry->next == level->next)
{
/* insertion window starts right behind the entry to remove
*/
if (entry->previous == NO_INDEX)
{
/* remove the first entry -> insertion may start at pos 0, now */
- cache->current_data = 0;
+ level->current_data = level->start_offset;
}
else
{
/* insertion may start right behind the previous entry */
entry_t *previous = get_entry(cache, entry->previous);
- cache->current_data = ALIGN_VALUE( previous->offset
+ level->current_data = ALIGN_VALUE( previous->offset
+ previous->size);
}
}
/* unlink it from the chain of used entries
*/
- if (entry->previous == NO_INDEX)
- cache->first = entry->next;
- else
- get_entry(cache, entry->previous)->next = entry->next;
-
- if (entry->next == NO_INDEX)
- cache->last = entry->previous;
- else
- get_entry(cache, entry->next)->previous = entry->previous;
-
+ unchain_entry(cache, level, entry, idx);
+
/* Move last entry into hole (if the removed one is not the last used).
* We need to do this since all used entries are at the beginning of
* the group's entries array.
@@ -689,18 +812,22 @@ drop_entry(svn_membuffer_t *cache, entry
*/
*entry = group->entries[group->used-1];
+ /* this ENTRY may belong to a different cache level than the entry
+ * we have just removed */
+ level = get_cache_level(cache, entry);
+
/* update foreign links to new index
*/
- if (last_in_group == cache->next)
- cache->next = idx;
+ if (last_in_group == level->next)
+ level->next = idx;
if (entry->previous == NO_INDEX)
- cache->first = idx;
+ level->first = idx;
else
get_entry(cache, entry->previous)->next = idx;
if (entry->next == NO_INDEX)
- cache->last = idx;
+ level->last = idx;
else
get_entry(cache, entry->next)->previous = idx;
}
@@ -722,16 +849,14 @@ insert_entry(svn_membuffer_t *cache, ent
apr_uint32_t idx = get_index(cache, entry);
apr_uint32_t group_index = idx / GROUP_SIZE;
entry_group_t *group = &cache->directory[group_index];
- entry_t *next = cache->next == NO_INDEX
- ? NULL
- : get_entry(cache, cache->next);
+ cache_level_t *level = get_cache_level(cache, entry);
/* The entry must start at the beginning of the insertion window.
* It must also be the first unused entry in the group.
*/
- assert(entry->offset == cache->current_data);
+ assert(entry->offset == level->current_data);
assert(idx == group_index * GROUP_SIZE + group->used);
- cache->current_data = ALIGN_VALUE(entry->offset + entry->size);
+ level->current_data = ALIGN_VALUE(entry->offset + entry->size);
/* update usage counters
*/
@@ -742,42 +867,12 @@ insert_entry(svn_membuffer_t *cache, ent
/* update entry chain
*/
- entry->next = cache->next;
- if (cache->first == NO_INDEX)
- {
- /* insert as the first entry and only in the chain
- */
- entry->previous = NO_INDEX;
- cache->last = idx;
- cache->first = idx;
- }
- else if (next == NULL)
- {
- /* insert as the last entry in the chain.
- * Note that it cannot also be at the beginning of the chain.
- */
- entry->previous = cache->last;
- get_entry(cache, cache->last)->next = idx;
- cache->last = idx;
- }
- else
- {
- /* insert either at the start of a non-empty list or
- * somewhere in the middle
- */
- entry->previous = next->previous;
- next->previous = idx;
-
- if (entry->previous != NO_INDEX)
- get_entry(cache, entry->previous)->next = idx;
- else
- cache->first = idx;
- }
+ chain_entry(cache, level, entry, idx);
/* The current insertion position must never point outside our
* data buffer.
*/
- assert(cache->current_data <= cache->data_size);
+ assert(level->current_data <= level->start_offset + level->size);
}
/* Map a KEY of 16 bytes to the CACHE and group that shall contain the
@@ -789,9 +884,12 @@ get_group_index(svn_membuffer_t **cache,
{
svn_membuffer_t *segment0 = *cache;
- /* select the cache segment to use. they have all the same group_count */
- *cache = &segment0[key[0] & (segment0->segment_count -1)];
- return key[1] % segment0->group_count;
+ /* select the cache segment to use. they have all the same group_count.
+ * Since key may not be well-distributed, pre-fold it to a smaller but
+ * "denser" ranger. The divisors are primes larger than the largest
+ * counts. */
+ *cache = &segment0[(key[1] % 2809637ull) & (segment0->segment_count - 1)];
+ return (key[0] % 5030895599ull) % segment0->group_count;
}
/* Reduce the hit count of ENTRY and update the accumulated hit info
@@ -912,6 +1010,8 @@ find_entry(svn_membuffer_t *cache,
*/
if (group->used == GROUP_SIZE)
{
+ static int count = 0;
+
/* every entry gets the same chance of being removed.
* Otherwise, we free the first entry, fill it and
* remove it again on the next occasion without considering
@@ -931,6 +1031,7 @@ find_entry(svn_membuffer_t *cache,
let_entry_age(cache, entry);
drop_entry(cache, entry);
+ printf("%d\n", ++count);
}
/* initialize entry for the new key
@@ -950,6 +1051,7 @@ static void
move_entry(svn_membuffer_t *cache, entry_t *entry)
{
apr_size_t size = ALIGN_VALUE(entry->size);
+ cache_level_t *level = get_cache_level(cache, entry);
/* This entry survived this cleansing run. Reset half of its
* hit count so that its removal gets more likely in the next
@@ -963,41 +1065,75 @@ move_entry(svn_membuffer_t *cache, entry
* Size-aligned moves tend to be faster than non-aligned ones
* because no "odd" bytes at the end need to special treatment.
*/
- if (entry->offset != cache->current_data)
+ if (entry->offset != level->current_data)
{
- memmove(cache->data + cache->current_data,
+ memmove(cache->data + level->current_data,
cache->data + entry->offset,
size);
- entry->offset = cache->current_data;
+ entry->offset = level->current_data;
}
/* The insertion position is now directly behind this entry.
*/
- cache->current_data = entry->offset + size;
- cache->next = entry->next;
+ level->current_data = entry->offset + size;
+ level->next = entry->next;
/* The current insertion position must never point outside our
* data buffer.
*/
- assert(cache->current_data <= cache->data_size);
+ assert(level->current_data <= level->start_offset + level->size);
+}
+
+/* Move ENTRY in CACHE from L1 to L2.
+ */
+static void
+promote_entry(svn_membuffer_t *cache, entry_t *entry)
+{
+ apr_uint32_t idx = get_index(cache, entry);
+ apr_size_t size = ALIGN_VALUE(entry->size);
+ assert(get_cache_level(cache, entry) == &cache->l1);
+
+ /* copy item from the current location in L1 to the start of L2's
+ * insertion window */
+ memmove(cache->data + cache->l2.current_data,
+ cache->data + entry->offset,
+ size);
+ entry->offset = cache->l2.current_data;
+
+ /* The insertion position is now directly behind this entry.
+ */
+ cache->l2.current_data += size;
+
+ /* remove ENTRY from chain of L1 entries and put it into L2
+ */
+ unchain_entry(cache, &cache->l1, entry, idx);
+ chain_entry(cache, &cache->l2, entry, idx);
}
-/* If necessary, enlarge the insertion window until it is at least
- * SIZE bytes long. SIZE must not exceed the data buffer size.
- * Return TRUE if enough room could be found or made. A FALSE result
+/* This function implements the cache insertion / eviction strategy for L2.
+ *
+ * If necessary, enlarge the insertion window of CACHE->L2 until it is at
+ * least TO_FIT_IN->SIZE bytes long. TO_FIT_IN->SIZE must not exceed the
+ * data buffer size allocated to CACHE->L2. IDX is the item index of
+ * TO_FIT_IN and is given for performance reasons.
+ *
+ * Return TRUE if enough room could be found or made. A FALSE result
* indicates that the respective item shall not be added.
*/
static svn_boolean_t
-ensure_data_insertable(svn_membuffer_t *cache, apr_size_t size)
+ensure_data_insertable_l2(svn_membuffer_t *cache,
+ entry_t *to_fit_in,
+ apr_uint32_t idx)
{
entry_t *entry;
apr_uint64_t average_hit_value;
apr_uint64_t threshold;
- /* accumulated size of the entries that have been removed to make
- * room for the new one.
- */
- apr_size_t drop_size = 0;
+ /* accumulated "worth" of items dropped so far */
+ apr_size_t drop_hits = 0;
+
+ /* verify parameters */
+ assert(idx == get_index(cache, to_fit_in));
/* This loop will eventually terminate because every cache entry
* would get dropped eventually:
@@ -1015,41 +1151,37 @@ ensure_data_insertable(svn_membuffer_t *
{
/* first offset behind the insertion window
*/
- apr_uint64_t end = cache->next == NO_INDEX
- ? cache->data_size
- : get_entry(cache, cache->next)->offset;
+ apr_uint64_t end = cache->l2.next == NO_INDEX
+ ? cache->l2.start_offset + cache->l2.size
+ : get_entry(cache, cache->l2.next)->offset;
/* leave function as soon as the insertion window is large enough
*/
- if (end >= size + cache->current_data)
+ if (end >= to_fit_in->size + cache->l2.current_data)
return TRUE;
- /* Don't be too eager to cache data. Smaller items will fit into
- * the cache after dropping a single item. Of the larger ones, we
- * will only accept about 50%. They are also likely to get evicted
- * soon due to their notoriously low hit counts.
- *
- * As long as enough similarly or even larger sized entries already
- * exist in the cache, much less insert requests will be rejected.
+ /* if the net worth (in hits) of items removed is already larger
+ * than what we want to insert, reject TO_FIT_IN because it still
+ * does not fit in.
*/
- if (2 * drop_size > size)
+ if (drop_hits > to_fit_in->hit_count)
return FALSE;
/* try to enlarge the insertion window
*/
- if (cache->next == NO_INDEX)
+ if (cache->l2.next == NO_INDEX)
{
/* We reached the end of the data buffer; restart at the beginning.
* Due to the randomized nature of our LFU implementation, very
* large data items may require multiple passes. Therefore, SIZE
* should be restricted to significantly less than data_size.
*/
- cache->current_data = 0;
- cache->next = cache->first;
+ cache->l2.current_data = cache->l2.start_offset;
+ cache->l2.next = cache->l2.first;
}
else
{
- entry = get_entry(cache, cache->next);
+ entry = get_entry(cache, cache->l2.next);
/* Keep entries that are very small. Those are likely to be data
* headers or similar management structures. So, they are probably
@@ -1061,14 +1193,24 @@ ensure_data_insertable(svn_membuffer_t *
{
move_entry(cache, entry);
}
+ else if (cache->l2.next / GROUP_SIZE == idx / GROUP_SIZE)
+ {
+ /* Special case: we cannot drop entries that are in the same
+ * group as TO_FIT_IN because that might the latter to become
+ * invalidated it it happens to be the highest used entry in
+ * the group. So, we must keep ENTRY unconditionally.
+ * (this is a very rare condition)
+ */
+ move_entry(cache, entry);
+ }
else
{
svn_boolean_t keep;
if (cache->hit_count > cache->used_entries)
{
- /* Roll the dice and determine a threshold somewhere from 0 up
- * to 2 times the average hit count.
+ /* Roll the dice and determine a threshold somewhere from
+ * 0 up to 2 times the average hit count.
*/
average_hit_value = cache->hit_count / cache->used_entries;
threshold = (average_hit_value+1) * (rand() % 4096) / 2048;
@@ -1077,9 +1219,9 @@ ensure_data_insertable(svn_membuffer_t *
}
else
{
- /* general hit count is low. Keep everything that got hit
- * at all and assign some 50% survival chance to everything
- * else.
+ /* general hit count is low. Keep everything that got
+ * hit at all and assign some 50% survival chance to
+ * everything else.
*/
keep = (entry->hit_count > 0) || (rand() & 1);
}
@@ -1087,15 +1229,16 @@ ensure_data_insertable(svn_membuffer_t *
/* keepers or destroyers? */
if (keep)
{
+ /* Keep ENTRY and move the insertion window.
+ */
move_entry(cache, entry);
}
else
{
- /* Drop the entry from the end of the insertion window, if it
- * has been hit less than the threshold. Otherwise, keep it and
- * move the insertion window one entry further.
+ /* Drop the entry from the end of the insertion window,
+ * because it had been hit less than the threshold.
*/
- drop_size += entry->size;
+ drop_hits += entry->hit_count;
drop_entry(cache, entry);
}
}
@@ -1106,6 +1249,70 @@ ensure_data_insertable(svn_membuffer_t *
* right answer. */
}
+/* This function implements the cache insertion / eviction strategy for L1.
+ *
+ * If necessary, enlarge the insertion window of CACHE->L1 by promoting
+ * entries to L2 until it is at least SIZE bytes long.
+ *
+ * Return TRUE if enough room could be found or made. A FALSE result
+ * indicates that the respective item shall not be added because it is
+ * too large.
+ */
+static svn_boolean_t
+ensure_data_insertable_l1(svn_membuffer_t *cache, apr_size_t size)
+{
+ entry_t *entry;
+
+ /* Guarantees that the while loop will terminate. */
+ if (size > cache->l1.size)
+ return FALSE;
+
+ /* This loop will eventually terminate because every cache entry
+ * would get dropped eventually.
+ */
+ while (1)
+ {
+ /* first offset behind the insertion window
+ */
+ apr_uint64_t end = cache->l1.next == NO_INDEX
+ ? cache->l1.start_offset + cache->l1.size
+ : get_entry(cache, cache->l1.next)->offset;
+
+ /* leave function as soon as the insertion window is large enough
+ */
+ if (end >= size + cache->l1.current_data)
+ return TRUE;
+
+ /* Enlarge the insertion window
+ */
+ if (cache->l1.next == NO_INDEX)
+ {
+ /* We reached the end of the data buffer; restart at the beginning.
+ * Due to the randomized nature of our LFU implementation, very
+ * large data items may require multiple passes. Therefore, SIZE
+ * should be restricted to significantly less than data_size.
+ */
+ cache->l1.current_data = cache->l1.start_offset;
+ cache->l1.next = cache->l1.first;
+ }
+ else
+ {
+ /* Remove the entry from the end of insertion window and promote
+ * it to L2, if it is important enough.
+ */
+ entry = get_entry(cache, cache->l1.next);
+
+ if (ensure_data_insertable_l2(cache, entry, cache->l1.next))
+ promote_entry(cache, entry);
+ else
+ drop_entry(cache, entry);
+ }
+ }
+
+ /* This will never be reached. But if it was, "can't insert" was the
+ * right answer. */
+}
+
/* Mimic apr_pcalloc in APR_POOL_DEBUG mode, i.e. handle failed allocations
* (e.g. OOM) properly: Allocate at least SIZE bytes from POOL and zero
* the content of the allocated memory if ZERO has been set. Return NULL
@@ -1225,13 +1432,13 @@ svn_cache__membuffer_cache_create(svn_me
*/
data_size = ALIGN_VALUE(total_size - directory_size + 1) - ITEM_ALIGNMENT;
- /* For cache sizes > 4TB, individual cache segments will be larger
- * than 16GB allowing for >4GB entries. But caching chunks larger
- * than 4GB is simply not supported.
+ /* For cache sizes > 16TB, individual cache segments will be larger
+ * than 32GB allowing for >4GB entries. But caching chunks larger
+ * than 4GB are simply not supported.
*/
- max_entry_size = data_size / 4 > MAX_ITEM_SIZE
+ max_entry_size = data_size / 8 > MAX_ITEM_SIZE
? MAX_ITEM_SIZE
- : data_size / 4;
+ : data_size / 8;
/* to keep the entries small, we use 32 bit indexes only
* -> we need to ensure that no more then 4G entries exist.
@@ -1259,13 +1466,25 @@ svn_cache__membuffer_cache_create(svn_me
hence "unused" */
c[seg].group_initialized = apr_pcalloc(pool, group_init_size);
- c[seg].first = NO_INDEX;
- c[seg].last = NO_INDEX;
- c[seg].next = NO_INDEX;
+ /* Allocate 1/4th of the data buffer to L1
+ */
+ c[seg].l1.first = NO_INDEX;
+ c[seg].l1.last = NO_INDEX;
+ c[seg].l1.next = NO_INDEX;
+ c[seg].l1.start_offset = 0;
+ c[seg].l1.size = ALIGN_VALUE(data_size / 4);
+ c[seg].l1.current_data = 0;
+
+ /* The remaining 3/4th will be used as L2
+ */
+ c[seg].l2.first = NO_INDEX;
+ c[seg].l2.last = NO_INDEX;
+ c[seg].l2.next = NO_INDEX;
+ c[seg].l2.start_offset = c[seg].l1.size;
+ c[seg].l2.size = data_size - c[seg].l1.size;
+ c[seg].l2.current_data = c[seg].l2.start_offset;
- c[seg].data_size = data_size;
c[seg].data = secure_aligned_alloc(pool, (apr_size_t)data_size, FALSE);
- c[seg].current_data = 0;
c[seg].data_used = 0;
c[seg].max_entry_size = max_entry_size;
@@ -1397,7 +1616,7 @@ membuffer_cache_set_internal(svn_membuff
*/
if ( buffer != NULL
&& cache->max_entry_size >= size
- && ensure_data_insertable(cache, size))
+ && ensure_data_insertable_l1(cache, size))
{
/* Remove old data for this key, if that exists.
* Get an unused entry for the key and and initialize it with
@@ -1405,7 +1624,7 @@ membuffer_cache_set_internal(svn_membuff
*/
entry = find_entry(cache, group_index, to_find, TRUE);
entry->size = size;
- entry->offset = cache->current_data;
+ entry->offset = cache->l1.current_data;
#ifdef SVN_DEBUG_CACHE_MEMBUFFER
@@ -1758,13 +1977,13 @@ membuffer_cache_set_partial_internal(svn
*/
drop_entry(cache, entry);
if ( (cache->max_entry_size >= size)
- && ensure_data_insertable(cache, size))
+ && ensure_data_insertable_l1(cache, size))
{
/* Write the new entry.
*/
entry = find_entry(cache, group_index, to_find, TRUE);
entry->size = size;
- entry->offset = cache->current_data;
+ entry->offset = cache->l1.current_data;
if (size)
memcpy(cache->data + entry->offset, data, size);
@@ -1829,6 +2048,22 @@ membuffer_cache_set_partial(svn_membuffe
* svn_cache__t instance.
*/
+/* Stores the combined key value for the given key. It will be used by
+ * combine_key() to short-circuit expensive hash calculations.
+ */
+typedef struct last_access_key_t
+{
+ /* result of key combining */
+ entry_key_t combined_key;
+
+ /* length of the key (or APR_HASH_KEY_STRING if not used) */
+ apr_size_t key_len;
+
+ /* the original key. Only KEY_LEN bytes are valid. We use uint32 for
+ * better compatibility with pseudo-md5 functions. */
+ apr_uint32_t key[64];
+} last_access_key_t;
+
/* Internal cache structure (used in svn_cache__t.cache_internal) basically
* holding the additional parameters needed to call the respective membuffer
* functions.
@@ -1876,6 +2111,11 @@ typedef struct svn_membuffer_cache_t
*/
int alloc_counter;
+ /* cache for the last key used.
+ * Will be NULL for caches with short fix-sized keys.
+ */
+ last_access_key_t *last_access;
+
/* if enabled, this will serialize the access to this instance.
*/
svn_mutex__t *mutex;
@@ -1893,46 +2133,127 @@ typedef struct svn_membuffer_cache_t
*/
#define ALLOCATIONS_PER_POOL_CLEAR 10
-
/* Basically calculate a hash value for KEY of length KEY_LEN, combine it
* with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
+ * This could replace combine_key() entirely but we actually use it only
+ * when the quick path failed.
*/
static void
-combine_key(svn_membuffer_cache_t *cache,
- const void *key,
- apr_ssize_t key_len)
+combine_long_key(svn_membuffer_cache_t *cache,
+ const void *key,
+ apr_ssize_t key_len)
{
+ assert(cache->last_access);
+
+ /* handle variable-length keys */
if (key_len == APR_HASH_KEY_STRING)
key_len = strlen((const char *) key);
- if (key_len < 16)
+ /* same key as the last time? -> short-circuit */
+ if ( key_len == cache->last_access->key_len
+ && memcmp(key, cache->last_access->key, key_len) == 0)
{
- apr_uint32_t data[4] = { 0 };
- memcpy(data, key, key_len);
+ memcpy(cache->combined_key, cache->last_access->combined_key,
+ sizeof(cache->combined_key));
+ }
+ else if (key_len >= 64)
+ {
+ /* relatively long key. Use the generic, slow hash code for it */
+ apr_md5((unsigned char*)cache->combined_key, key, key_len);
+ cache->combined_key[0] ^= cache->prefix[0];
+ cache->combined_key[1] ^= cache->prefix[1];
- svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key, data);
+ /* is the key short enough to cache the result? */
+ if (key_len <= sizeof(cache->last_access->key))
+ {
+ memcpy(cache->last_access->combined_key, cache->combined_key,
+ sizeof(cache->combined_key));
+ cache->last_access->key_len = key_len;
+ memcpy(cache->last_access->key, key, key_len);
+ }
}
- else if (key_len < 32)
+ else
{
- apr_uint32_t data[8] = { 0 };
- memcpy(data, key, key_len);
+ /* shorter keys use efficient hash code and *do* cache the results */
+ cache->last_access->key_len = key_len;
+ if (key_len < 16)
+ {
+ memset(cache->last_access->key, 0, 16);
+ memcpy(cache->last_access->key, key, key_len);
- svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key, data);
+ svn__pseudo_md5_15((apr_uint32_t *)cache->combined_key,
+ cache->last_access->key);
+ }
+ else if (key_len < 32)
+ {
+ memset(cache->last_access->key, 0, 32);
+ memcpy(cache->last_access->key, key, key_len);
+
+ svn__pseudo_md5_31((apr_uint32_t *)cache->combined_key,
+ cache->last_access->key);
+ }
+ else
+ {
+ memset(cache->last_access->key, 0, 64);
+ memcpy(cache->last_access->key, key, key_len);
+
+ svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key,
+ cache->last_access->key);
+ }
+
+ cache->combined_key[0] ^= cache->prefix[0];
+ cache->combined_key[1] ^= cache->prefix[1];
+
+ memcpy(cache->last_access->combined_key, cache->combined_key,
+ sizeof(cache->combined_key));
}
- else if (key_len < 64)
+}
+
+/* Basically calculate a hash value for KEY of length KEY_LEN, combine it
+ * with the CACHE->PREFIX and write the result in CACHE->COMBINED_KEY.
+ */
+static void
+combine_key(svn_membuffer_cache_t *cache,
+ const void *key,
+ apr_ssize_t key_len)
+{
+ /* copy of *key, padded with 0 */
+ apr_uint64_t data[2];
+
+ /* short, fixed-size keys are the most common case */
+ if (key_len == 16)
+ {
+ data[0] = ((const apr_uint64_t *)key)[0];
+ data[1] = ((const apr_uint64_t *)key)[1];
+ }
+ else if (key_len == 8)
+ {
+ data[0] = ((const apr_uint64_t *)key)[0];
+ data[1] = 0;
+ }
+ else if (key_len != APR_HASH_KEY_STRING && key_len < 16)
{
- apr_uint32_t data[16] = { 0 };
+ data[0] = 0;
+ data[1] = 0;
memcpy(data, key, key_len);
-
- svn__pseudo_md5_63((apr_uint32_t *)cache->combined_key, data);
}
else
{
- apr_md5((unsigned char*)cache->combined_key, key, key_len);
+ /* longer or variably sized keys */
+ combine_long_key(cache, key, key_len);
+ return;
}
- cache->combined_key[0] ^= cache->prefix[0];
- cache->combined_key[1] ^= cache->prefix[1];
+ /* scramble key DATA. All of this must be reversible to prevent key
+ * collisions. So, we limit ourselves to xor and permutations. */
+ data[1] = (data[1] << 27) | (data[1] >> 37);
+ data[1] ^= data[0] & 0xffff;
+ data[0] ^= data[1] & 0xffffffffffff0000ull;
+ data[0] = (data[0] << 43) | (data[0] >> 21);
+
+ /* combine with this cache's namespace */
+ cache->combined_key[0] = data[0] ^ cache->prefix[0];
+ cache->combined_key[1] = data[1] ^ cache->prefix[1];
}
/* Implement svn_cache__vtable_t.get (not thread-safe)
@@ -2112,9 +2433,9 @@ static svn_error_t *
svn_membuffer_get_segment_info(svn_membuffer_t *segment,
svn_cache__info_t *info)
{
- info->data_size += segment->data_size;
+ info->data_size += segment->l1.size + segment->l2.size;
info->used_size += segment->data_used;
- info->total_size += segment->data_size +
+ info->total_size += segment->l1.size + segment->l2.size +
segment->group_count * GROUP_SIZE * sizeof(entry_t);
info->used_entries += segment->used_entries;
@@ -2347,6 +2668,18 @@ svn_cache__create_membuffer_cache(svn_ca
pool));
memcpy(cache->prefix, checksum->digest, sizeof(cache->prefix));
+ /* fix-length keys of 16 bytes or under don't need a buffer because we
+ * can use a very fast key combining algorithm. */
+ if ((klen == APR_HASH_KEY_STRING) || klen > sizeof(entry_key_t))
+ {
+ cache->last_access = apr_pcalloc(pool, sizeof(*cache->last_access));
+ cache->last_access->key_len = APR_HASH_KEY_STRING;
+ }
+ else
+ {
+ cache->last_access = NULL;
+ }
+
#ifdef SVN_DEBUG_CACHE_MEMBUFFER
/* Initialize cache debugging support.
@@ -2362,6 +2695,7 @@ svn_cache__create_membuffer_cache(svn_ca
wrapper->cache_internal = cache;
wrapper->error_handler = 0;
wrapper->error_baton = 0;
+ wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
*cache_p = wrapper;
return SVN_NO_ERROR;
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-memcache.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-memcache.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-memcache.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/cache-memcache.c Tue May 7 13:26:25 2013
@@ -407,6 +407,7 @@ svn_cache__create_memcache(svn_cache__t
wrapper->cache_internal = cache;
wrapper->error_handler = 0;
wrapper->error_baton = 0;
+ wrapper->pretend_empty = !!getenv("SVN_X_DOES_NOT_MARK_THE_SPOT");
*cache_p = wrapper;
return SVN_NO_ERROR;
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.c Tue May 7 13:26:25 2013
@@ -76,7 +76,7 @@ svn_cache__get(void **value_p,
out with FOUND set to false. */
*found = FALSE;
#ifdef SVN_DEBUG
- if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+ if (cache->pretend_empty)
return SVN_NO_ERROR;
#endif
@@ -119,7 +119,7 @@ svn_cache__iter(svn_boolean_t *completed
apr_pool_t *scratch_pool)
{
#ifdef SVN_DEBUG
- if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+ if (cache->pretend_empty)
/* Pretend CACHE is empty. */
return SVN_NO_ERROR;
#endif
@@ -146,7 +146,7 @@ svn_cache__get_partial(void **value,
out with FOUND set to false. */
*found = FALSE;
#ifdef SVN_DEBUG
- if (getenv("SVN_X_DOES_NOT_MARK_THE_SPOT"))
+ if (cache->pretend_empty)
return SVN_NO_ERROR;
#endif
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.h
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.h?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.h (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/cache.h Tue May 7 13:26:25 2013
@@ -99,6 +99,10 @@ struct svn_cache__t {
/* Total number of function calls that returned an error. */
apr_uint64_t failures;
+
+ /* Cause all getters to act as though the cache contains no data.
+ (Currently this never becomes set except in maintainer builds.) */
+ svn_boolean_t pretend_empty;
};
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/config.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/config.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/config.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/config.c Tue May 7 13:26:25 2013
@@ -77,9 +77,10 @@ struct cfg_option_t
svn_error_t *
-svn_config_create(svn_config_t **cfgp,
- svn_boolean_t section_names_case_sensitive,
- apr_pool_t *result_pool)
+svn_config_create2(svn_config_t **cfgp,
+ svn_boolean_t section_names_case_sensitive,
+ svn_boolean_t option_names_case_sensitive,
+ apr_pool_t *result_pool)
{
svn_config_t *cfg = apr_palloc(result_pool, sizeof(*cfg));
@@ -90,21 +91,26 @@ svn_config_create(svn_config_t **cfgp,
cfg->tmp_key = svn_stringbuf_create_empty(result_pool);
cfg->tmp_value = svn_stringbuf_create_empty(result_pool);
cfg->section_names_case_sensitive = section_names_case_sensitive;
+ cfg->option_names_case_sensitive = option_names_case_sensitive;
*cfgp = cfg;
return SVN_NO_ERROR;
}
svn_error_t *
-svn_config_read2(svn_config_t **cfgp, const char *file,
+svn_config_read3(svn_config_t **cfgp, const char *file,
svn_boolean_t must_exist,
svn_boolean_t section_names_case_sensitive,
- apr_pool_t *pool)
+ svn_boolean_t option_names_case_sensitive,
+ apr_pool_t *result_pool)
{
svn_config_t *cfg;
svn_error_t *err;
- SVN_ERR(svn_config_create(&cfg, section_names_case_sensitive, pool));
+ SVN_ERR(svn_config_create2(&cfg,
+ section_names_case_sensitive,
+ option_names_case_sensitive,
+ result_pool));
/* Yes, this is platform-specific code in Subversion, but there's no
practical way to migrate it into APR, as it's simultaneously
@@ -114,10 +120,10 @@ svn_config_read2(svn_config_t **cfgp, co
#ifdef WIN32
if (0 == strncmp(file, SVN_REGISTRY_PREFIX, SVN_REGISTRY_PREFIX_LEN))
err = svn_config__parse_registry(cfg, file + SVN_REGISTRY_PREFIX_LEN,
- must_exist, pool);
+ must_exist, result_pool);
else
#endif /* WIN32 */
- err = svn_config__parse_file(cfg, file, must_exist, pool);
+ err = svn_config__parse_file(cfg, file, must_exist, result_pool);
if (err != SVN_NO_ERROR)
return err;
@@ -130,13 +136,17 @@ svn_config_read2(svn_config_t **cfgp, co
svn_error_t *
svn_config_parse(svn_config_t **cfgp, svn_stream_t *stream,
svn_boolean_t section_names_case_sensitive,
+ svn_boolean_t option_names_case_sensitive,
apr_pool_t *result_pool)
{
svn_config_t *cfg;
svn_error_t *err;
apr_pool_t *scratch_pool = svn_pool_create(result_pool);
- err = svn_config_create(&cfg, section_names_case_sensitive, result_pool);
+ err = svn_config_create2(&cfg,
+ section_names_case_sensitive,
+ option_names_case_sensitive,
+ result_pool);
if (err == SVN_NO_ERROR)
err = svn_config__parse_stream(cfg, stream, result_pool, scratch_pool);
@@ -178,7 +188,8 @@ read_all(svn_config_t **cfgp,
#ifdef WIN32
if (sys_registry_path)
{
- SVN_ERR(svn_config_read2(cfgp, sys_registry_path, FALSE, FALSE, pool));
+ SVN_ERR(svn_config_read3(cfgp, sys_registry_path, FALSE, FALSE, FALSE,
+ pool));
red_config = TRUE;
}
#endif /* WIN32 */
@@ -189,7 +200,8 @@ read_all(svn_config_t **cfgp,
SVN_ERR(svn_config_merge(*cfgp, sys_file_path, FALSE));
else
{
- SVN_ERR(svn_config_read2(cfgp, sys_file_path, FALSE, FALSE, pool));
+ SVN_ERR(svn_config_read3(cfgp, sys_file_path,
+ FALSE, FALSE, FALSE, pool));
red_config = TRUE;
}
}
@@ -203,8 +215,8 @@ read_all(svn_config_t **cfgp,
SVN_ERR(svn_config_merge(*cfgp, usr_registry_path, FALSE));
else
{
- SVN_ERR(svn_config_read2(cfgp, usr_registry_path,
- FALSE, FALSE, pool));
+ SVN_ERR(svn_config_read3(cfgp, usr_registry_path,
+ FALSE, FALSE, FALSE, pool));
red_config = TRUE;
}
}
@@ -216,13 +228,14 @@ read_all(svn_config_t **cfgp,
SVN_ERR(svn_config_merge(*cfgp, usr_file_path, FALSE));
else
{
- SVN_ERR(svn_config_read2(cfgp, usr_file_path, FALSE, FALSE, pool));
+ SVN_ERR(svn_config_read3(cfgp, usr_file_path,
+ FALSE, FALSE, FALSE, pool));
red_config = TRUE;
}
}
if (! red_config)
- SVN_ERR(svn_config_create(cfgp, FALSE, pool));
+ SVN_ERR(svn_config_create2(cfgp, FALSE, FALSE, pool));
return SVN_NO_ERROR;
}
@@ -352,7 +365,10 @@ svn_config_merge(svn_config_t *cfg, cons
### We could use a tmp subpool for this, since merge_cfg is going
to be tossed afterwards. Premature optimization, though? */
svn_config_t *merge_cfg;
- SVN_ERR(svn_config_read2(&merge_cfg, file, must_exist, FALSE, cfg->pool));
+ SVN_ERR(svn_config_read3(&merge_cfg, file, must_exist,
+ cfg->section_names_case_sensitive,
+ cfg->option_names_case_sensitive,
+ cfg->pool));
/* Now copy the new options into the original table. */
for_each_option(merge_cfg, cfg, merge_cfg->pool, merge_callback);
@@ -427,7 +443,8 @@ find_option(svn_config_t *cfg, const cha
/* Canonicalize the option key */
svn_stringbuf_set(cfg->tmp_key, option);
- make_hash_key(cfg->tmp_key->data);
+ if (! cfg->option_names_case_sensitive)
+ make_hash_key(cfg->tmp_key->data);
opt = apr_hash_get(sec->options, cfg->tmp_key->data,
cfg->tmp_key->len);
@@ -606,13 +623,17 @@ static void
svn_config_create_option(cfg_option_t **opt,
const char *option,
const char *value,
+ svn_boolean_t option_names_case_sensitive,
apr_pool_t *pool)
{
cfg_option_t *o;
o = apr_palloc(pool, sizeof(cfg_option_t));
o->name = apr_pstrdup(pool, option);
- o->hash_key = make_hash_key(apr_pstrdup(pool, option));
+ if(option_names_case_sensitive)
+ o->hash_key = o->name;
+ else
+ o->hash_key = make_hash_key(apr_pstrdup(pool, option));
o->value = apr_pstrdup(pool, value);
o->x_value = NULL;
@@ -677,7 +698,9 @@ svn_config_set(svn_config_t *cfg,
}
/* Create a new option */
- svn_config_create_option(&opt, option, value, cfg->pool);
+ svn_config_create_option(&opt, option, value,
+ cfg->option_names_case_sensitive,
+ cfg->pool);
if (sec == NULL)
{
@@ -1043,10 +1066,11 @@ svn_config_dup(svn_config_t **cfgp,
apr_hash_index_t *optidx;
*cfgp = 0;
- SVN_ERR(svn_config_create(cfgp, FALSE, pool));
+ SVN_ERR(svn_config_create2(cfgp, FALSE, FALSE, pool));
(*cfgp)->x_values = src->x_values;
(*cfgp)->section_names_case_sensitive = src->section_names_case_sensitive;
+ (*cfgp)->option_names_case_sensitive = src->option_names_case_sensitive;
for (sectidx = apr_hash_first(pool, src->sections);
sectidx != NULL;
@@ -1076,7 +1100,9 @@ svn_config_dup(svn_config_t **cfgp,
apr_hash_this(optidx, &optkey, &optkeyLength, &optval);
srcopt = optval;
- svn_config_create_option(&destopt, srcopt->name, srcopt->value, pool);
+ svn_config_create_option(&destopt, srcopt->name, srcopt->value,
+ (*cfgp)->option_names_case_sensitive,
+ pool);
destopt->value = apr_pstrdup(pool, srcopt->value);
destopt->x_value = apr_pstrdup(pool, srcopt->x_value);
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/config_file.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/config_file.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/config_file.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/config_file.c Tue May 7 13:26:25 2013
@@ -1152,6 +1152,16 @@ svn_config_ensure(const char *config_dir
"### ra_local (the file:// scheme). The value represents the number" NL
"### of MB used by the cache." NL
"# memory-cache-size = 16" NL
+ "### Set diff-ignore-content-type to 'yes' to cause 'svn diff' to" NL
+ "### attempt to show differences of all modified files regardless" NL
+ "### of their MIME content type. By default, Subversion will only" NL
+ "### attempt to show differences for files believed to have human-" NL
+ "### readable (non-binary) content. This option is especially" NL
+ "### useful when Subversion is configured (via the 'diff-cmd'" NL
+ "### option) to employ an external differencing tool which is able" NL
+ "### to show meaningful differences for binary file formats. [New" NL
+ "### in 1.9]" NL
+ "# diff-ignore-content-type = no" NL
"" NL
"### Section for configuring automatic properties." NL
"[auto-props]" NL
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/config_impl.h
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/config_impl.h?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/config_impl.h (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/config_impl.h Tue May 7 13:26:25 2013
@@ -67,6 +67,9 @@ struct svn_config_t
/* Specifies whether section names are populated case sensitively. */
svn_boolean_t section_names_case_sensitive;
+
+ /* Specifies whether option names are populated case sensitively. */
+ svn_boolean_t option_names_case_sensitive;
};
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/deprecated.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/deprecated.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/deprecated.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/deprecated.c Tue May 7 13:26:25 2013
@@ -1176,16 +1176,39 @@ svn_mergeinfo_intersect(svn_mergeinfo_t
}
/*** From config.c ***/
+svn_error_t *
+svn_config_create(svn_config_t **cfgp,
+ svn_boolean_t section_names_case_sensitive,
+ apr_pool_t *result_pool)
+{
+ return svn_error_trace(svn_config_create2(cfgp,
+ section_names_case_sensitive,
+ FALSE,
+ result_pool));
+}
+
+svn_error_t *
+svn_config_read2(svn_config_t **cfgp, const char *file,
+ svn_boolean_t must_exist,
+ svn_boolean_t section_names_case_sensitive,
+ apr_pool_t *result_pool)
+{
+ return svn_error_trace(svn_config_read3(cfgp, file,
+ must_exist,
+ section_names_case_sensitive,
+ FALSE,
+ result_pool));
+}
svn_error_t *
svn_config_read(svn_config_t **cfgp, const char *file,
svn_boolean_t must_exist,
- apr_pool_t *pool)
+ apr_pool_t *result_pool)
{
- return svn_error_trace(svn_config_read2(cfgp, file,
+ return svn_error_trace(svn_config_read3(cfgp, file,
must_exist,
- FALSE,
- pool));
+ FALSE, FALSE,
+ result_pool));
}
#ifdef SVN_DISABLE_FULL_VERSION_MATCH
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/error.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/error.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/error.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/error.c Tue May 7 13:26:25 2013
@@ -679,7 +679,7 @@ svn_strerror(apr_status_t statcode, char
}
#ifdef SVN_DEBUG
-/* Defines svn__errno */
+/* Defines svn__errno and svn__apr_errno */
#include "errorcode.inc"
#endif
@@ -705,6 +705,12 @@ svn_error_symbolic_name(apr_status_t sta
for (i = 0; i < sizeof(svn__errno) / sizeof(svn__errno[0]); i++)
if (svn__errno[i].errcode == (int)statcode)
return svn__errno[i].errname;
+
+ /* Try APR errors. */
+ /* Linear search through a sorted array */
+ for (i = 0; i < sizeof(svn__apr_errno) / sizeof(svn__apr_errno[0]); i++)
+ if (svn__apr_errno[i].errcode == (int)statcode)
+ return svn__apr_errno[i].errname;
#endif /* SVN_DEBUG */
/* ### TODO: do we need APR_* error macros? What about APR_TO_OS_ERROR()? */
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/stream.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/stream.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/stream.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/stream.c Tue May 7 13:26:25 2013
@@ -1676,6 +1676,9 @@ typedef struct lazyopen_baton_t {
svn_stream_t *real_stream;
apr_pool_t *pool;
+ /* Whether to open the wrapped stream on a close call. */
+ svn_boolean_t open_on_close;
+
} lazyopen_baton_t;
@@ -1747,7 +1750,9 @@ close_handler_lazyopen(void *baton)
{
lazyopen_baton_t *b = baton;
- if (b->real_stream != NULL)
+ if (b->open_on_close)
+ SVN_ERR(lazyopen_if_unopened(b));
+ if (b->real_stream)
SVN_ERR(svn_stream_close(b->real_stream));
return SVN_NO_ERROR;
@@ -1796,6 +1801,7 @@ is_buffered_lazyopen(void *baton)
svn_stream_t *
svn_stream_lazyopen_create(svn_stream_lazyopen_func_t open_func,
void *open_baton,
+ svn_boolean_t open_on_close,
apr_pool_t *result_pool)
{
lazyopen_baton_t *lob = apr_pcalloc(result_pool, sizeof(*lob));
@@ -1805,6 +1811,7 @@ svn_stream_lazyopen_create(svn_stream_la
lob->open_baton = open_baton;
lob->real_stream = NULL;
lob->pool = result_pool;
+ lob->open_on_close = open_on_close;
stream = svn_stream_create(lob, result_pool);
svn_stream_set_read(stream, read_handler_lazyopen);
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/string.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/string.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/string.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/string.c Tue May 7 13:26:25 2013
@@ -1242,7 +1242,7 @@ svn_string__similarity(const svn_string_
/* Calculate LCS length of the remainder */
for (pstr = stra; pstr < enda; ++pstr)
{
- int i;
+ apr_size_t i;
for (i = 1; i <= slots; ++i)
{
if (*pstr == strb[i-1])
Modified: subversion/branches/wc-collate-path/subversion/libsvn_subr/subst.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_subr/subst.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_subr/subst.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_subr/subst.c Tue May 7 13:26:25 2013
@@ -1701,7 +1701,7 @@ create_special_file_from_stream(svn_stre
svn_io_file_del_none, pool));
/* Do the atomic rename from our temporary location. */
- return svn_io_file_rename(dst_tmp, dst, pool);
+ return svn_error_trace(svn_io_file_rename(dst_tmp, dst, pool));
}
@@ -1749,8 +1749,9 @@ svn_subst_copy_and_translate4(const char
SVN_ERR(svn_stream_open_readonly(&src_stream, src, pool, pool));
}
- return svn_error_trace(create_special_file_from_stream(src_stream,
- dst, pool));
+ SVN_ERR(create_special_file_from_stream(src_stream, dst, pool));
+
+ return svn_error_trace(svn_stream_close(src_stream));
}
/* else !expand */
Modified: subversion/branches/wc-collate-path/subversion/libsvn_wc/adm_ops.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_wc/adm_ops.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_wc/adm_ops.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_wc/adm_ops.c Tue May 7 13:26:25 2013
@@ -1155,7 +1155,7 @@ svn_wc__get_pristine_contents_by_checksu
gpl_baton->checksum = checksum;
*contents = svn_stream_lazyopen_create(get_pristine_lazyopen_func,
- gpl_baton, result_pool);
+ gpl_baton, FALSE, result_pool);
}
return SVN_NO_ERROR;
Modified: subversion/branches/wc-collate-path/subversion/libsvn_wc/conflicts.c
URL: http://svn.apache.org/viewvc/subversion/branches/wc-collate-path/subversion/libsvn_wc/conflicts.c?rev=1479901&r1=1479900&r2=1479901&view=diff
==============================================================================
--- subversion/branches/wc-collate-path/subversion/libsvn_wc/conflicts.c (original)
+++ subversion/branches/wc-collate-path/subversion/libsvn_wc/conflicts.c Tue May 7 13:26:25 2013
@@ -1716,42 +1716,33 @@ save_merge_result(svn_skel_t **work_item
/* Call the conflict resolver callback for a text conflict, and resolve
- * the conflict if it tells us to do so.
+ * the conflict if the callback tells us to do so. (Do not mark the
+ * conflict as resolved.)
*
- * Assume that there is a text conflict on the path DB/LOCAL_ABSPATH.
+ * Assume that there is a text conflict on the path DB/LOCAL_ABSPATH,
+ * and CDESC is the conflict description.
*
* Call CONFLICT_FUNC with CONFLICT_BATON to find out whether and how
- * it wants to resolve the conflict. Pass it a conflict description
- * containing OPERATION, LEFT/RIGHT_ABSPATH, LEFT/RIGHT_VERSION,
- * RESULT_TARGET and DETRANSLATED_TARGET.
+ * it wants to resolve the conflict.
*
* If the callback returns a resolution other than 'postpone', then
* perform that requested resolution and prepare to mark the conflict
- * as resolved.
+ * as resolved ... ?? by adding work items to *WORK_ITEMS ??.
+ *
+ * MERGE_OPTIONS is used if the resolver callback requests a merge.
*
* Return *WORK_ITEMS that will do the on-disk work required to complete
* the resolution (but not to mark the conflict as resolved), and set
* *WAS_RESOLVED to true, if it was resolved. Set *WORK_ITEMS to NULL
* and *WAS_RESOLVED to FALSE otherwise.
- *
- * RESULT_TARGET is the path to the merged file produced by the internal
- * or external 3-way merge, which may contain conflict markers, in
- * repository normal form. DETRANSLATED_TARGET is the 'mine' version of
- * the file, also in RNF.
*/
static svn_error_t *
resolve_text_conflict(svn_skel_t **work_items,
svn_boolean_t *was_resolved,
svn_wc__db_t *db,
const char *local_abspath,
+ svn_wc_conflict_description2_t *cdesc,
const apr_array_header_t *merge_options,
- svn_wc_operation_t operation,
- const char *left_abspath,
- const char *right_abspath,
- const svn_wc_conflict_version_t *left_version,
- const svn_wc_conflict_version_t *right_version,
- const char *result_target,
- const char *detranslated_target,
svn_wc_conflict_resolver_func2_t conflict_func,
void *conflict_baton,
apr_pool_t *result_pool,
@@ -1759,8 +1750,6 @@ resolve_text_conflict(svn_skel_t **work_
{
svn_wc_conflict_result_t *result;
svn_skel_t *work_item;
- svn_wc_conflict_description2_t *cdesc;
- apr_hash_t *props;
*work_items = NULL;
*was_resolved = FALSE;
@@ -1768,21 +1757,6 @@ resolve_text_conflict(svn_skel_t **work_
/* Give the conflict resolution callback a chance to clean
up the conflicts before we mark the file 'conflicted' */
- SVN_ERR(svn_wc__db_read_props(&props, db, local_abspath,
- scratch_pool, scratch_pool));
-
- cdesc = svn_wc_conflict_description_create_text2(local_abspath,
- scratch_pool);
- cdesc->is_binary = FALSE;
- cdesc->mime_type = svn_prop_get_value(props, SVN_PROP_MIME_TYPE);
- cdesc->base_abspath = left_abspath;
- cdesc->their_abspath = right_abspath;
- cdesc->my_abspath = detranslated_target;
- cdesc->merged_file = result_target;
- cdesc->operation = operation;
- cdesc->src_left_version = left_version;
- cdesc->src_right_version = right_version;
-
SVN_ERR(conflict_func(&result, cdesc, conflict_baton, scratch_pool,
scratch_pool));
if (result == NULL)
@@ -1798,7 +1772,7 @@ resolve_text_conflict(svn_skel_t **work_
merged-file first: */
result->merged_file
? result->merged_file
- : result_target,
+ : cdesc->merged_file,
result_pool, scratch_pool));
}
@@ -1809,13 +1783,13 @@ resolve_text_conflict(svn_skel_t **work_
db, local_abspath,
result->choice,
merge_options,
- left_abspath,
- right_abspath,
+ cdesc->base_abspath,
+ cdesc->their_abspath,
/* ### Sure this is an abspath? */
result->merged_file
? result->merged_file
- : result_target,
- detranslated_target,
+ : cdesc->merged_file,
+ cdesc->my_abspath,
result_pool, scratch_pool));
*work_items = svn_wc__wq_merge(*work_items, work_item, result_pool);
}
@@ -1826,19 +1800,74 @@ resolve_text_conflict(svn_skel_t **work_
}
+/* Set *DESC to a new description of the text conflict in
+ * CONFLICT_SKEL. If there is no text conflict in CONFLICT_SKEL, return
+ * an error.
+ *
+ * Use OPERATION and shallow copies of LEFT_VERSION and RIGHT_VERSION,
+ * rather than reading them from CONFLICT_SKEL. Use IS_BINARY and
+ * MIME_TYPE for the corresponding fields of *DESC.
+ *
+ * Allocate results in RESULT_POOL. SCRATCH_POOL is used for temporary
+ * allocations. */
static svn_error_t *
-setup_tree_conflict_desc(svn_wc_conflict_description2_t **desc,
- svn_wc__db_t *db,
- const char *local_abspath,
- svn_wc_operation_t operation,
- const svn_wc_conflict_version_t *left_version,
- const svn_wc_conflict_version_t *right_version,
- svn_wc_conflict_reason_t local_change,
- svn_wc_conflict_action_t incoming_change,
- apr_pool_t *result_pool,
- apr_pool_t *scratch_pool)
+read_text_conflict_desc(svn_wc_conflict_description2_t **desc,
+ svn_wc__db_t *db,
+ const char *local_abspath,
+ const svn_skel_t *conflict_skel,
+ svn_boolean_t is_binary,
+ const char *mime_type,
+ svn_wc_operation_t operation,
+ const svn_wc_conflict_version_t *left_version,
+ const svn_wc_conflict_version_t *right_version,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ *desc = svn_wc_conflict_description_create_text2(local_abspath, result_pool);
+ (*desc)->is_binary = is_binary;
+ (*desc)->mime_type = mime_type;
+ (*desc)->operation = operation;
+ (*desc)->src_left_version = left_version;
+ (*desc)->src_right_version = right_version;
+
+ SVN_ERR(svn_wc__conflict_read_text_conflict(&(*desc)->my_abspath,
+ &(*desc)->base_abspath,
+ &(*desc)->their_abspath,
+ db, local_abspath,
+ conflict_skel,
+ result_pool, scratch_pool));
+ (*desc)->merged_file = apr_pstrdup(result_pool, local_abspath);
+
+ return SVN_NO_ERROR;
+}
+
+/* Set *CONFLICT_DESC to a new description of the tree conflict in
+ * CONFLICT_SKEL. If there is no tree conflict in CONFLICT_SKEL, return
+ * an error.
+ *
+ * Use OPERATION and shallow copies of LEFT_VERSION and RIGHT_VERSION,
+ * rather than reading them from CONFLICT_SKEL.
+ *
+ * Allocate results in RESULT_POOL. SCRATCH_POOL is used for temporary
+ * allocations. */
+static svn_error_t *
+read_tree_conflict_desc(svn_wc_conflict_description2_t **desc,
+ svn_wc__db_t *db,
+ const char *local_abspath,
+ const svn_skel_t *conflict_skel,
+ svn_wc_operation_t operation,
+ const svn_wc_conflict_version_t *left_version,
+ const svn_wc_conflict_version_t *right_version,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
{
svn_node_kind_t tc_kind;
+ svn_wc_conflict_reason_t local_change;
+ svn_wc_conflict_action_t incoming_change;
+
+ SVN_ERR(svn_wc__conflict_read_tree_conflict(
+ &local_change, &incoming_change, NULL,
+ db, local_abspath, conflict_skel, scratch_pool, scratch_pool));
if (left_version)
tc_kind = left_version->node_kind;
@@ -1966,30 +1995,29 @@ svn_wc__conflict_invoke_resolver(svn_wc_
SVN_ERR(svn_wc__mark_resolved_prop_conflicts(db, local_abspath,
scratch_pool));
}
+ svn_pool_destroy(iterpool);
}
if (text_conflicted)
{
- const char *mine_abspath;
- const char *their_original_abspath;
- const char *their_abspath;
svn_skel_t *work_items;
svn_boolean_t was_resolved;
+ svn_wc_conflict_description2_t *desc;
+ apr_hash_t *props;
- SVN_ERR(svn_wc__conflict_read_text_conflict(&mine_abspath,
- &their_original_abspath,
- &their_abspath,
- db, local_abspath,
- conflict_skel,
- scratch_pool, scratch_pool));
+ SVN_ERR(svn_wc__db_read_props(&props, db, local_abspath,
+ scratch_pool, scratch_pool));
+
+ SVN_ERR(read_text_conflict_desc(&desc,
+ db, local_abspath, conflict_skel, FALSE,
+ svn_prop_get_value(props,
+ SVN_PROP_MIME_TYPE),
+ operation, left_version, right_version,
+ scratch_pool, scratch_pool));
SVN_ERR(resolve_text_conflict(&work_items, &was_resolved,
- db, local_abspath,
+ db, local_abspath, desc,
merge_options,
- operation,
- their_original_abspath, their_abspath,
- left_version, right_version,
- local_abspath, mine_abspath,
resolver_func, resolver_baton,
scratch_pool, scratch_pool));
@@ -2010,23 +2038,13 @@ svn_wc__conflict_invoke_resolver(svn_wc_
if (tree_conflicted)
{
- svn_wc_conflict_reason_t local_change;
- svn_wc_conflict_action_t incoming_change;
svn_wc_conflict_result_t *result;
svn_wc_conflict_description2_t *desc;
- SVN_ERR(svn_wc__conflict_read_tree_conflict(&local_change,
- &incoming_change,
- NULL,
- db, local_abspath,
- conflict_skel,
- scratch_pool, scratch_pool));
-
- SVN_ERR(setup_tree_conflict_desc(&desc,
- db, local_abspath,
- operation, left_version, right_version,
- local_change, incoming_change,
- scratch_pool, scratch_pool));
+ SVN_ERR(read_tree_conflict_desc(&desc,
+ db, local_abspath, conflict_skel,
+ operation, left_version, right_version,
+ scratch_pool, scratch_pool));
/* Tell the resolver func about this conflict. */
SVN_ERR(resolver_func(&result, desc, resolver_baton, scratch_pool,
@@ -2042,24 +2060,29 @@ svn_wc__conflict_invoke_resolver(svn_wc_
/* Read all property conflicts contained in CONFLICT_SKEL into
* individual conflict descriptions, and append those descriptions
- * to the CONFLICTS array.
+ * to the CONFLICTS array. If there is no property conflict in
+ * CONFLICT_SKEL, return an error.
*
* If NOT create_tempfiles, always create a legacy property conflict
* descriptor.
*
+ * Use NODE_KIND, OPERATION and shallow copies of LEFT_VERSION and
+ * RIGHT_VERSION, rather than reading them from CONFLICT_SKEL.
+ *
* Allocate results in RESULT_POOL. SCRATCH_POOL is used for temporary
* allocations. */
static svn_error_t *
-read_prop_conflicts(apr_array_header_t *conflicts,
- svn_wc__db_t *db,
- const char *local_abspath,
- svn_skel_t *conflict_skel,
- svn_boolean_t create_tempfiles,
- svn_wc_operation_t operation,
- const svn_wc_conflict_version_t *left_version,
- const svn_wc_conflict_version_t *right_version,
- apr_pool_t *result_pool,
- apr_pool_t *scratch_pool)
+read_prop_conflict_descs(apr_array_header_t *conflicts,
+ svn_wc__db_t *db,
+ const char *local_abspath,
+ svn_skel_t *conflict_skel,
+ svn_boolean_t create_tempfiles,
+ svn_node_kind_t node_kind,
+ svn_wc_operation_t operation,
+ const svn_wc_conflict_version_t *left_version,
+ const svn_wc_conflict_version_t *right_version,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
{
const char *prop_reject_file;
apr_hash_t *my_props;
@@ -2084,7 +2107,7 @@ read_prop_conflicts(apr_array_header_t *
svn_wc_conflict_description2_t *desc;
desc = svn_wc_conflict_description_create_prop2(local_abspath,
- svn_node_unknown,
+ node_kind,
"", result_pool);
/* ### This should be changed. The prej file should be stored
@@ -2115,7 +2138,7 @@ read_prop_conflicts(apr_array_header_t *
svn_pool_clear(iterpool);
desc = svn_wc_conflict_description_create_prop2(local_abspath,
- svn_node_unknown,
+ node_kind,
propname,
result_pool);
@@ -2246,51 +2269,37 @@ svn_wc__read_conflicts(const apr_array_h
right_version = APR_ARRAY_IDX(locations, 1, const svn_wc_conflict_version_t *);
if (prop_conflicted)
- SVN_ERR(read_prop_conflicts(cflcts, db, local_abspath, conflict_skel,
- create_tempfiles,
- operation, left_version, right_version,
- result_pool, scratch_pool));
+ {
+ svn_node_kind_t node_kind
+ = left_version ? left_version->node_kind : svn_node_unknown;
+
+ SVN_ERR(read_prop_conflict_descs(cflcts,
+ db, local_abspath, conflict_skel,
+ create_tempfiles, node_kind,
+ operation, left_version, right_version,
+ result_pool, scratch_pool));
+ }
if (text_conflicted)
{
svn_wc_conflict_description2_t *desc;
- desc = svn_wc_conflict_description_create_text2(local_abspath,
- result_pool);
-
- desc->operation = operation;
- desc->src_left_version = left_version;
- desc->src_right_version = right_version;
-
- SVN_ERR(svn_wc__conflict_read_text_conflict(&desc->my_abspath,
- &desc->base_abspath,
- &desc->their_abspath,
- db, local_abspath,
- conflict_skel,
- result_pool, scratch_pool));
-
- desc->merged_file = apr_pstrdup(result_pool, local_abspath);
+ SVN_ERR(read_text_conflict_desc(&desc,
+ db, local_abspath, conflict_skel,
+ FALSE /*is_binary*/, NULL /*mime_type*/,
+ operation, left_version, right_version,
+ result_pool, scratch_pool));
APR_ARRAY_PUSH(cflcts, svn_wc_conflict_description2_t*) = desc;
}
if (tree_conflicted)
{
- svn_wc_conflict_reason_t local_change;
- svn_wc_conflict_action_t incoming_change;
svn_wc_conflict_description2_t *desc;
- SVN_ERR(svn_wc__conflict_read_tree_conflict(&local_change,
- &incoming_change,
- NULL,
- db, local_abspath,
- conflict_skel,
- scratch_pool, scratch_pool));
-
- SVN_ERR(setup_tree_conflict_desc(&desc,
- db, local_abspath,
- operation, left_version, right_version,
- local_change, incoming_change,
- result_pool, scratch_pool));
+ SVN_ERR(read_tree_conflict_desc(&desc,
+ db, local_abspath, conflict_skel,
+ operation, left_version, right_version,
+ result_pool, scratch_pool));
APR_ARRAY_PUSH(cflcts, const svn_wc_conflict_description2_t *) = desc;
}
@@ -2302,6 +2311,46 @@ svn_wc__read_conflicts(const apr_array_h
/*** Resolving a conflict automatically ***/
+/* Prepare to delete an artifact file at ARTIFACT_FILE_ABSPATH in the
+ * working copy at DB/WRI_ABSPATH.
+ *
+ * Set *WORK_ITEMS to a new work item that, when run, will delete the
+ * artifact file; or to NULL if there is no file to delete.
+ *
+ * Set *FILE_FOUND to TRUE if the artifact file is found on disk and its
+ * node kind is 'file'; otherwise do not change *FILE_FOUND. FILE_FOUND
+ * may be NULL if not required.
+ */
+static svn_error_t *
+remove_artifact_file_if_exists(svn_skel_t **work_items,
+ svn_boolean_t *file_found,
+ svn_wc__db_t *db,
+ const char *wri_abspath,
+ const char *artifact_file_abspath,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ *work_items = NULL;
+ if (artifact_file_abspath)
+ {
+ svn_node_kind_t node_kind;
+
+ SVN_ERR(svn_io_check_path(artifact_file_abspath, &node_kind,
+ scratch_pool));
+ if (node_kind == svn_node_file)
+ {
+ SVN_ERR(svn_wc__wq_build_file_remove(work_items,
+ db, wri_abspath,
+ artifact_file_abspath,
+ result_pool, scratch_pool));
+ if (file_found)
+ *file_found = TRUE;
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
/*
* Resolve the text conflict found in DB/LOCAL_ABSPATH/CONFLICTS
* according to CONFLICT_CHOICE. (Don't mark it as resolved.)
@@ -2332,7 +2381,6 @@ resolve_text_conflict_on_node(svn_boolea
const char *conflict_new = NULL;
const char *conflict_working = NULL;
const char *auto_resolve_src;
- svn_node_kind_t node_kind;
svn_skel_t *work_item;
SVN_ERR(svn_wc__conflict_read_text_conflict(&conflict_working,
@@ -2425,47 +2473,20 @@ resolve_text_conflict_on_node(svn_boolea
If not the UI shows the conflict as already resolved
(and in this case we just remove the in-db conflict) */
- if (conflict_old)
- {
- SVN_ERR(svn_io_check_path(conflict_old, &node_kind, scratch_pool));
- if (node_kind == svn_node_file)
- {
- SVN_ERR(svn_wc__wq_build_file_remove(&work_item, db,
- local_abspath,
- conflict_old,
- scratch_pool, scratch_pool));
- *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
- *removed_reject_files = TRUE;
- }
- }
-
- if (conflict_new)
- {
- SVN_ERR(svn_io_check_path(conflict_new, &node_kind, scratch_pool));
- if (node_kind == svn_node_file)
- {
- SVN_ERR(svn_wc__wq_build_file_remove(&work_item, db,
- local_abspath,
- conflict_new,
- scratch_pool, scratch_pool));
- *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
- *removed_reject_files = TRUE;
- }
- }
-
- if (conflict_working)
- {
- SVN_ERR(svn_io_check_path(conflict_working, &node_kind, scratch_pool));
- if (node_kind == svn_node_file)
- {
- SVN_ERR(svn_wc__wq_build_file_remove(&work_item, db,
- local_abspath,
- conflict_working,
- scratch_pool, scratch_pool));
- *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
- *removed_reject_files = TRUE;
- }
- }
+ SVN_ERR(remove_artifact_file_if_exists(&work_item, removed_reject_files,
+ db, local_abspath, conflict_old,
+ scratch_pool, scratch_pool));
+ *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
+
+ SVN_ERR(remove_artifact_file_if_exists(&work_item, removed_reject_files,
+ db, local_abspath, conflict_new,
+ scratch_pool, scratch_pool));
+ *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
+
+ SVN_ERR(remove_artifact_file_if_exists(&work_item, removed_reject_files,
+ db, local_abspath, conflict_working,
+ scratch_pool, scratch_pool));
+ *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
return SVN_NO_ERROR;
}
@@ -2518,7 +2539,6 @@ resolve_prop_conflict_on_node(svn_boolea
svn_wc_conflict_choice_t conflict_choice,
apr_pool_t *scratch_pool)
{
- svn_node_kind_t node_kind;
const char *prop_reject_file;
apr_hash_t *mine_props;
apr_hash_t *their_old_props;
@@ -2601,21 +2621,14 @@ resolve_prop_conflict_on_node(svn_boolea
If not the UI shows the conflict as already resolved
(and in this case we just remove the in-db conflict) */
- if (prop_reject_file)
- {
- SVN_ERR(svn_io_check_path(prop_reject_file, &node_kind, scratch_pool));
- if (node_kind == svn_node_file)
- {
- svn_skel_t *work_item;
+ {
+ svn_skel_t *work_item;
- SVN_ERR(svn_wc__wq_build_file_remove(&work_item, db,
- local_abspath,
- prop_reject_file,
- scratch_pool, scratch_pool));
- *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
- *removed_reject_file = TRUE;
- }
- }
+ SVN_ERR(remove_artifact_file_if_exists(&work_item, removed_reject_file,
+ db, local_abspath, prop_reject_file,
+ scratch_pool, scratch_pool));
+ *work_items = svn_wc__wq_merge(*work_items, work_item, scratch_pool);
+ }
return SVN_NO_ERROR;
}