You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@subversion.apache.org by ju...@apache.org on 2011/10/13 10:13:16 UTC

svn commit: r1182711 [2/2] - in /subversion/branches/tree-read-api: ./ subversion/include/ subversion/include/private/ subversion/libsvn_client/ subversion/libsvn_fs/ subversion/libsvn_fs_base/bdb/ subversion/libsvn_fs_fs/ subversion/libsvn_ra_svn/ sub...

Modified: subversion/branches/tree-read-api/subversion/libsvn_subr/cache-membuffer.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/libsvn_subr/cache-membuffer.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/libsvn_subr/cache-membuffer.c (original)
+++ subversion/branches/tree-read-api/subversion/libsvn_subr/cache-membuffer.c Thu Oct 13 08:13:15 2011
@@ -30,6 +30,7 @@
 #include "cache.h"
 #include "svn_string.h"
 #include "private/svn_dep_compat.h"
+#include "private/svn_mutex.h"
 
 /*
  * This svn_cache__t implementation actually consists of two parts:
@@ -423,13 +424,11 @@ struct svn_membuffer_t
    */
   apr_uint64_t total_hits;
 
-#if APR_HAS_THREADS
   /* A lock for intra-process synchronization to the cache, or NULL if
    * the cache's creator doesn't feel the cache needs to be
    * thread-safe.
    */
-  apr_thread_mutex_t *mutex;
-#endif
+  svn_mutex__t *mutex;
 };
 
 /* Align integer VALUE to the next ITEM_ALIGNMENT boundary.
@@ -440,43 +439,6 @@ struct svn_membuffer_t
  */
 #define ALIGN_POINTER(pointer) ((void*)ALIGN_VALUE((apr_size_t)(char*)(pointer)))
 
-/* Acquire the cache mutex, if necessary.
- */
-static svn_error_t *
-lock_cache(svn_membuffer_t *cache)
-{
-#if APR_HAS_THREADS
-  if (cache->mutex)
-  {
-    apr_status_t status = apr_thread_mutex_lock(cache->mutex);
-    if (status)
-      return svn_error_wrap_apr(status, _("Can't lock cache mutex"));
-  }
-#endif
-
-  return SVN_NO_ERROR;
-}
-
-/* Release the cache mutex, if necessary.
- */
-static svn_error_t *
-unlock_cache(svn_membuffer_t *cache, svn_error_t *err)
-{
-#if APR_HAS_THREADS
-  if (cache->mutex)
-  {
-    apr_status_t status = apr_thread_mutex_unlock(cache->mutex);
-    if (err)
-      return err;
-
-    if (status)
-      return svn_error_wrap_apr(status, _("Can't unlock cache mutex"));
-  }
-#endif
-
-  return err;
-}
-
 /* Resolve a dictionary entry reference, i.e. return the entry
  * for the given IDX.
  */
@@ -713,7 +675,7 @@ initialize_group(svn_membuffer_t *cache,
 static entry_t *
 find_entry(svn_membuffer_t *cache,
            apr_uint32_t group_index,
-           unsigned char *to_find,
+           const unsigned char *to_find,
            svn_boolean_t find_empty)
 {
   entry_t *group;
@@ -1076,25 +1038,10 @@ svn_cache__membuffer_cache_create(svn_me
           return svn_error_wrap_apr(APR_ENOMEM, _("OOM"));
         }
 
-#if APR_HAS_THREADS
       /* A lock for intra-process synchronization to the cache, or NULL if
        * the cache's creator doesn't feel the cache needs to be
        * thread-safe. */
-
-      c[seg].mutex = NULL;
-      if (thread_safe)
-        {
-          apr_status_t status =
-              apr_thread_mutex_create(&(c[seg].mutex),
-                                      APR_THREAD_MUTEX_DEFAULT,
-                                      pool);
-          if (status)
-            return svn_error_wrap_apr(status, _("Can't create cache mutex"));
-        }
-#else
-      if (thread_safe)
-        return svn_error_wrap_apr(APR_ENOTIMPL, _("APR doesn't support threads"));
-#endif
+      SVN_ERR(svn_mutex__init(&c[seg].mutex, thread_safe, pool));
     }
 
   /* done here
@@ -1104,48 +1051,30 @@ svn_cache__membuffer_cache_create(svn_me
 }
 
 
-/* Try to insert the ITEM and use the KEY to unqiuely identify it.
+/* Try to insert the serialized item given in BUFFER with SIZE into
+ * the group GROUP_INDEX of CACHE and uniquely identify it by hash 
+ * value TO_FIND. 
+ * 
  * However, there is no guarantee that it will actually be put into
- * the cache. If there is already some data associated to the KEY,
+ * the cache. If there is already some data associated with TO_FIND,
  * it will be removed from the cache even if the new data cannot
  * be inserted.
- *
- * The SERIALIZER is called to transform the ITEM into a single,
- * flat data buffer. Temporary allocations may be done in POOL.
+ * 
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
  */
 static svn_error_t *
-membuffer_cache_set(svn_membuffer_t *cache,
-                    const void *key,
-                    apr_size_t key_len,
-                    void *item,
-                    svn_cache__serialize_func_t serializer,
-                    DEBUG_CACHE_MEMBUFFER_TAG_ARG
-                    apr_pool_t *scratch_pool)
+membuffer_cache_set_internal(svn_membuffer_t *cache,
+                             const unsigned char *to_find,
+                             apr_uint32_t group_index,
+                             char *buffer,
+                             apr_size_t size,
+                             DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                             apr_pool_t *scratch_pool)
 {
-  apr_uint32_t group_index;
-  unsigned char to_find[KEY_SIZE];
-  entry_t *entry;
-  char *buffer;
-  apr_size_t size;
-
-  /* find the entry group that will hold the key.
-   */
-  group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
-  if (group_index == NO_INDEX)
-    return SVN_NO_ERROR;
-
-  /* Serialize data data.
-   */
-  if (item)
-    SVN_ERR(serializer(&buffer, &size, item, scratch_pool));
-
-  /* The actual cache data access needs to sync'ed
-   */
-  SVN_ERR(lock_cache(cache));
-
   /* if necessary, enlarge the insertion window.
    */
-  if (   item != NULL
+  if (   buffer != NULL
       && cache->data_size / 4 > size
       && ensure_data_insertable(cache, size))
     {
@@ -1153,7 +1082,7 @@ membuffer_cache_set(svn_membuffer_t *cac
        * Get an unused entry for the key and and initialize it with
        * the serialized item's (future) posion within data buffer.
        */
-      entry = find_entry(cache, group_index, to_find, TRUE);
+      entry_t *entry = find_entry(cache, group_index, to_find, TRUE);
       entry->size = size;
       entry->offset = cache->current_data;
 
@@ -1182,44 +1111,76 @@ membuffer_cache_set(svn_membuffer_t *cac
        */
       find_entry(cache, group_index, to_find, TRUE);
     }
-
-  /* done here -> unlock the cache
-   */
-  return unlock_cache(cache, SVN_NO_ERROR);
+  return SVN_NO_ERROR;
 }
 
-/* Look for the *ITEM identified by KEY. If no item has been stored
- * for KEY, *ITEM will be NULL. Otherwise, the DESERIALIZER is called
- * re-construct the proper object from the serialized data.
- * Allocations will be done in POOL.
+/* Try to insert the ITEM and use the KEY to unqiuely identify it.
+ * However, there is no guarantee that it will actually be put into
+ * the cache. If there is already some data associated to the KEY,
+ * it will be removed from the cache even if the new data cannot
+ * be inserted.
+ *
+ * The SERIALIZER is called to transform the ITEM into a single,
+ * flat data buffer. Temporary allocations may be done in POOL.
  */
 static svn_error_t *
-membuffer_cache_get(svn_membuffer_t *cache,
+membuffer_cache_set(svn_membuffer_t *cache,
                     const void *key,
                     apr_size_t key_len,
-                    void **item,
-                    svn_cache__deserialize_func_t deserializer,
+                    void *item,
+                    svn_cache__serialize_func_t serializer,
                     DEBUG_CACHE_MEMBUFFER_TAG_ARG
-                    apr_pool_t *result_pool)
+                    apr_pool_t *scratch_pool)
 {
   apr_uint32_t group_index;
   unsigned char to_find[KEY_SIZE];
-  entry_t *entry;
-  char *buffer;
+  char *buffer = NULL;
   apr_size_t size;
 
   /* find the entry group that will hold the key.
    */
-  group_index = get_group_index(&cache, key, key_len, to_find, result_pool);
+  group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
   if (group_index == NO_INDEX)
-    {
-      /* Some error occured, return "item not found".
-       */
-      *item = NULL;
-      return SVN_NO_ERROR;
-    }
+    return SVN_NO_ERROR;
+
+  /* Serialize data data.
+   */
+  if (item)
+    SVN_ERR(serializer(&buffer, &size, item, scratch_pool));
 
-  SVN_ERR(lock_cache(cache));
+  /* The actual cache data access needs to sync'ed
+   */
+  SVN_MUTEX__WITH_LOCK(cache->mutex,
+                       membuffer_cache_set_internal(cache,
+                                                    to_find,
+                                                    group_index,
+                                                    buffer,
+                                                    size,
+                                                    DEBUG_CACHE_MEMBUFFER_TAG
+                                                    scratch_pool));
+  return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. If no item has been stored for KEY, 
+ * *BUFFER will be NULL. Otherwise, return a copy of the serialized
+ * data in *BUFFER and return its size in *ITEM_SIZE. Allocations will 
+ * be done in POOL.
+ * 
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_get_internal(svn_membuffer_t *cache,
+                             apr_uint32_t group_index,
+                             const unsigned char *to_find,
+                             char **buffer,
+                             apr_size_t *item_size,
+                             DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                             apr_pool_t *result_pool)
+{
+  entry_t *entry;
+  apr_size_t size;
 
   /* The actual cache data access needs to sync'ed
    */
@@ -1229,13 +1190,15 @@ membuffer_cache_get(svn_membuffer_t *cac
     {
       /* no such entry found.
        */
-      *item = NULL;
-      return unlock_cache(cache, SVN_NO_ERROR);
+      *buffer = NULL;
+      *item_size = 0;
+  
+      return SVN_NO_ERROR;
     }
-
+    
   size = ALIGN_VALUE(entry->size);
-  buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1));
-  memcpy(buffer, (const char*)cache->data + entry->offset, size);
+  *buffer = ALIGN_POINTER(apr_palloc(result_pool, size + ITEM_ALIGNMENT-1));
+  memcpy(*buffer, (const char*)cache->data + entry->offset, size);
 
 #ifdef SVN_DEBUG_CACHE_MEMBUFFER
 
@@ -1258,45 +1221,91 @@ membuffer_cache_get(svn_membuffer_t *cac
   cache->hit_count++;
   cache->total_hits++;
 
-  SVN_ERR(unlock_cache(cache, SVN_NO_ERROR));
-
-  /* re-construct the original data object from its serialized form.
-   */
-  return deserializer(item, buffer, entry->size, result_pool);
+  *item_size = entry->size;
+  
+  return SVN_NO_ERROR;
 }
 
-/* Look for the cache entry identified by KEY and KEY_LEN. FOUND indicates
- * whether that entry exists. If not found, *ITEM will be NULL. Otherwise,
- * the DESERIALIZER is called with that entry and the BATON provided
- * and will extract the desired information. The result is set in *ITEM.
+/* Look for the *ITEM identified by KEY. If no item has been stored
+ * for KEY, *ITEM will be NULL. Otherwise, the DESERIALIZER is called
+ * re-construct the proper object from the serialized data.
  * Allocations will be done in POOL.
  */
 static svn_error_t *
-membuffer_cache_get_partial(svn_membuffer_t *cache,
-                            const void *key,
-                            apr_size_t key_len,
-                            void **item,
-                            svn_boolean_t *found,
-                            svn_cache__partial_getter_func_t deserializer,
-                            void *baton,
-                            DEBUG_CACHE_MEMBUFFER_TAG_ARG
-                            apr_pool_t *result_pool)
+membuffer_cache_get(svn_membuffer_t *cache,
+                    const void *key,
+                    apr_size_t key_len,
+                    void **item,
+                    svn_cache__deserialize_func_t deserializer,
+                    DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                    apr_pool_t *result_pool)
 {
   apr_uint32_t group_index;
   unsigned char to_find[KEY_SIZE];
-  entry_t *entry;
-  svn_error_t *err = SVN_NO_ERROR;
+  char *buffer;
+  apr_size_t size;
 
+  /* find the entry group that will hold the key.
+   */
   group_index = get_group_index(&cache, key, key_len, to_find, result_pool);
+  if (group_index == NO_INDEX)
+    {
+      /* Some error occured, return "item not found".
+       */
+      *item = NULL;
+      return SVN_NO_ERROR;
+    }
 
-  SVN_ERR(lock_cache(cache));
+  SVN_MUTEX__WITH_LOCK(cache->mutex,
+                       membuffer_cache_get_internal(cache,
+                                                    group_index,
+                                                    to_find,
+                                                    &buffer,
+                                                    &size,
+                                                    DEBUG_CACHE_MEMBUFFER_TAG
+                                                    result_pool));
 
-  entry = find_entry(cache, group_index, to_find, FALSE);
+  /* re-construct the original data object from its serialized form.
+   */
+  if (buffer == NULL)
+    {
+      *item = NULL;
+      return SVN_NO_ERROR;
+    }
+    
+  return deserializer(item, buffer, size, result_pool);
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. FOUND indicates whether that entry exists.
+ * If not found, *ITEM will be NULL.
+ * 
+ * Otherwise, the DESERIALIZER is called with that entry and the BATON 
+ * provided and will extract the desired information. The result is set
+ * in *ITEM. Allocations will be done in POOL.
+ * 
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_get_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_get_partial_internal(svn_membuffer_t *cache,
+                                     apr_uint32_t group_index,
+                                     const unsigned char *to_find,
+                                     void **item,
+                                     svn_boolean_t *found,
+                                     svn_cache__partial_getter_func_t deserializer,
+                                     void *baton,
+                                     DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                                     apr_pool_t *result_pool)
+{
+  entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
   cache->total_reads++;
   if (entry == NULL)
     {
       *item = NULL;
       *found = FALSE;
+      
+      return SVN_NO_ERROR;
     }
   else
     {
@@ -1324,50 +1333,75 @@ membuffer_cache_get_partial(svn_membuffe
 
 #endif
 
-      err = deserializer(item,
-                         (const char*)cache->data + entry->offset,
-                         entry->size,
-                         baton,
-                         result_pool);
+      return deserializer(item,
+                          (const char*)cache->data + entry->offset,
+                          entry->size,
+                          baton,
+                          result_pool);
     }
-
-  /* done here -> unlock the cache
-   */
-  return unlock_cache(cache, err);
 }
 
-/* Look for the cache entry identified by KEY and KEY_LEN. If no entry
- * has been found, the function returns without modifying the cache.
- * Otherwise, FUNC is called with that entry and the BATON provided
- * and may modify the cache entry. Allocations will be done in POOL.
+/* Look for the cache entry identified by KEY and KEY_LEN. FOUND indicates
+ * whether that entry exists. If not found, *ITEM will be NULL. Otherwise,
+ * the DESERIALIZER is called with that entry and the BATON provided
+ * and will extract the desired information. The result is set in *ITEM.
+ * Allocations will be done in POOL.
  */
 static svn_error_t *
-membuffer_cache_set_partial(svn_membuffer_t *cache,
+membuffer_cache_get_partial(svn_membuffer_t *cache,
                             const void *key,
                             apr_size_t key_len,
-                            svn_cache__partial_setter_func_t func,
+                            void **item,
+                            svn_boolean_t *found,
+                            svn_cache__partial_getter_func_t deserializer,
                             void *baton,
                             DEBUG_CACHE_MEMBUFFER_TAG_ARG
-                            apr_pool_t *scratch_pool)
+                            apr_pool_t *result_pool)
 {
   apr_uint32_t group_index;
   unsigned char to_find[KEY_SIZE];
-  entry_t *entry;
-  svn_error_t *err = SVN_NO_ERROR;
 
-  /* cache item lookup
-   */
-  group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
+  group_index = get_group_index(&cache, key, key_len, to_find, result_pool);
 
-  SVN_ERR(lock_cache(cache));
+  SVN_MUTEX__WITH_LOCK(cache->mutex,
+                       membuffer_cache_get_partial_internal
+                           (cache, group_index, to_find, item, found,
+                            deserializer, baton, DEBUG_CACHE_MEMBUFFER_TAG
+                            result_pool));
 
-  entry = find_entry(cache, group_index, to_find, FALSE);
+  return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry in group GROUP_INDEX of CACHE, identified
+ * by the hash value TO_FIND. If no entry has been found, the function
+ * returns without modifying the cache.
+ * 
+ * Otherwise, FUNC is called with that entry and the BATON provided
+ * and may modify the cache entry. Allocations will be done in POOL.
+ * 
+ * Note: This function requires the caller to serialization access.
+ * Don't call it directly, call membuffer_cache_set_partial instead.
+ */
+static svn_error_t *
+membuffer_cache_set_partial_internal(svn_membuffer_t *cache,
+                                     apr_uint32_t group_index,
+                                     const unsigned char *to_find,
+                                     svn_cache__partial_setter_func_t func,
+                                     void *baton,
+                                     DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                                     apr_pool_t *scratch_pool)
+{
+  /* cache item lookup
+   */
+  entry_t *entry = find_entry(cache, group_index, to_find, FALSE);
   cache->total_reads++;
 
   /* this function is a no-op if the item is not in cache
    */
   if (entry != NULL)
     {
+      svn_error_t *err;
+
       /* access the serialized cache item */
       char *data = (char*)cache->data + entry->offset;
       char *orig_data = data;
@@ -1441,9 +1475,39 @@ membuffer_cache_set_partial(svn_membuffe
         }
     }
 
+  return SVN_NO_ERROR;
+}
+
+/* Look for the cache entry identified by KEY and KEY_LEN. If no entry
+ * has been found, the function returns without modifying the cache.
+ * Otherwise, FUNC is called with that entry and the BATON provided
+ * and may modify the cache entry. Allocations will be done in POOL.
+ */
+static svn_error_t *
+membuffer_cache_set_partial(svn_membuffer_t *cache,
+                            const void *key,
+                            apr_size_t key_len,
+                            svn_cache__partial_setter_func_t func,
+                            void *baton,
+                            DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                            apr_pool_t *scratch_pool)
+{
+  apr_uint32_t group_index;
+  unsigned char to_find[KEY_SIZE];
+
+  /* cache item lookup
+   */
+  group_index = get_group_index(&cache, key, key_len, to_find, scratch_pool);
+
+  SVN_MUTEX__WITH_LOCK(cache->mutex,
+                       membuffer_cache_set_partial_internal
+                           (cache, group_index, to_find, func, baton,
+                            DEBUG_CACHE_MEMBUFFER_TAG_ARG
+                            scratch_pool));
+
   /* done here -> unlock the cache
    */
-  return unlock_cache(cache, err);
+  return SVN_NO_ERROR;
 }
 
 /* Implement the svn_cache__t interface on top of a shared membuffer cache.
@@ -1652,6 +1716,8 @@ svn_membuffer_cache_iter(svn_boolean_t *
                           _("Can't iterate a membuffer-based cache"));
 }
 
+/* Implement svn_cache__vtable_t.get_partial
+ */
 static svn_error_t *
 svn_membuffer_cache_get_partial(void **value_p,
                                 svn_boolean_t *found,
@@ -1695,6 +1761,8 @@ svn_membuffer_cache_get_partial(void **v
   return SVN_NO_ERROR;
 }
 
+/* Implement svn_cache__vtable_t.set_partial
+ */
 static svn_error_t *
 svn_membuffer_cache_set_partial(void *cache_void,
                                 const void *key,
@@ -1728,6 +1796,8 @@ svn_membuffer_cache_set_partial(void *ca
   return SVN_NO_ERROR;
 }
 
+/* Implement svn_cache__vtable_t.is_cachable
+ */
 static svn_boolean_t
 svn_membuffer_cache_is_cachable(void *cache_void, apr_size_t size)
 {
@@ -1740,6 +1810,25 @@ svn_membuffer_cache_is_cachable(void *ca
       && (size < APR_UINT32_MAX - ITEM_ALIGNMENT);
 }
 
+/* Add statistics of SEGMENT to INFO.
+ */
+static svn_error_t *
+svn_membuffer_get_segment_info(svn_membuffer_t *segment,
+                               svn_cache__info_t *info)
+{
+  info->data_size += segment->data_size;
+  info->used_size += segment->data_used;
+  info->total_size += segment->data_size +
+      segment->group_count * GROUP_SIZE * sizeof(entry_t);
+
+  info->used_entries += segment->used_entries;
+  info->total_entries += segment->group_count * GROUP_SIZE;
+
+  return SVN_NO_ERROR;
+}
+
+/* Implement svn_cache__vtable_t.get_info
+ */
 static svn_error_t *
 svn_membuffer_cache_get_info(void *cache_void,
                              svn_cache__info_t *info,
@@ -1765,18 +1854,8 @@ svn_membuffer_cache_get_info(void *cache
   for (i = 0; i < cache->membuffer->segment_count; ++i)
     {
       svn_membuffer_t *segment = cache->membuffer + i;
-
-      SVN_ERR(lock_cache(segment));
-
-      info->data_size += segment->data_size;
-      info->used_size += segment->data_used;
-      info->total_size += segment->data_size +
-          segment->group_count * GROUP_SIZE * sizeof(entry_t);
-
-      info->used_entries += segment->used_entries;
-      info->total_entries += segment->group_count * GROUP_SIZE;
-
-      SVN_ERR(unlock_cache(segment, SVN_NO_ERROR));
+      SVN_MUTEX__WITH_LOCK(segment->mutex, 
+                           svn_membuffer_get_segment_info(segment, info));
     }
 
   return SVN_NO_ERROR;

Modified: subversion/branches/tree-read-api/subversion/libsvn_subr/cmdline.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/libsvn_subr/cmdline.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/libsvn_subr/cmdline.c (original)
+++ subversion/branches/tree-read-api/subversion/libsvn_subr/cmdline.c Thu Oct 13 08:13:15 2011
@@ -73,6 +73,7 @@ svn_cmdline_init(const char *progname, F
   apr_status_t status;
   apr_pool_t *pool;
   svn_error_t *err;
+  char prefix_buf[64];  /* 64 is probably bigger than most program names */
 
 #ifndef WIN32
   {
@@ -197,11 +198,17 @@ svn_cmdline_init(const char *progname, F
       return EXIT_FAILURE;
     }
 
-  /* This has to happen before any pools are created. */
+  strncpy(prefix_buf, progname, sizeof(prefix_buf) - 3);
+  prefix_buf[sizeof(prefix_buf) - 3] = '\0';
+  strcat(prefix_buf, ": ");
+
+  /* DSO pool must be created before any other pools used by the
+     application so that pool cleanup doesn't unload DSOs too
+     early. See docstring of svn_dso_initialize2(). */
   if ((err = svn_dso_initialize2()))
     {
-      if (error_stream && err->message)
-        fprintf(error_stream, "%s", err->message);
+      if (error_stream)
+        svn_handle_error2(err, error_stream, TRUE, prefix_buf);
 
       svn_error_clear(err);
       return EXIT_FAILURE;
@@ -223,8 +230,8 @@ svn_cmdline_init(const char *progname, F
 
   if ((err = svn_nls_init()))
     {
-      if (error_stream && err->message)
-        fprintf(error_stream, "%s", err->message);
+      if (error_stream)
+        svn_handle_error2(err, error_stream, TRUE, prefix_buf);
 
       svn_error_clear(err);
       return EXIT_FAILURE;

Modified: subversion/branches/tree-read-api/subversion/libsvn_subr/dso.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/libsvn_subr/dso.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/libsvn_subr/dso.c (original)
+++ subversion/branches/tree-read-api/subversion/libsvn_subr/dso.c Thu Oct 13 08:13:15 2011
@@ -26,10 +26,10 @@
 #include "svn_pools.h"
 #include "svn_private_config.h"
 
+#include "private/svn_mutex.h"
+
 /* A mutex to protect our global pool and cache. */
-#if APR_HAS_THREADS
-static apr_thread_mutex_t *dso_mutex;
-#endif
+static svn_mutex__t *dso_mutex = NULL;
 
 /* Global pool to allocate DSOs in. */
 static apr_pool_t *dso_pool;
@@ -49,40 +49,21 @@ static int not_there_sentinel;
 svn_error_t *
 svn_dso_initialize2(void)
 {
-#if APR_HAS_THREADS
-  apr_status_t status;
-#endif
   if (dso_pool)
     return SVN_NO_ERROR;
 
   dso_pool = svn_pool_create(NULL);
 
-#if APR_HAS_THREADS
-  status = apr_thread_mutex_create(&dso_mutex,
-                                   APR_THREAD_MUTEX_DEFAULT, dso_pool);
-  if (status)
-    return svn_error_wrap_apr(status, _("Can't create DSO mutex"));
-#endif
+  SVN_ERR(svn_mutex__init(&dso_mutex, APR_HAS_THREADS, dso_pool));
 
   dso_cache = apr_hash_make(dso_pool);
   return SVN_NO_ERROR;
 }
 
 #if APR_HAS_DSO
-svn_error_t *
-svn_dso_load(apr_dso_handle_t **dso, const char *fname)
+static svn_error_t *
+svn_dso_load_internal(apr_dso_handle_t **dso, const char *fname)
 {
-  apr_status_t status;
-
-  if (! dso_pool)
-    SVN_ERR(svn_dso_initialize2());
-
-#if APR_HAS_THREADS
-  status = apr_thread_mutex_lock(dso_mutex);
-  if (status)
-    return svn_error_wrap_apr(status, _("Can't grab DSO mutex"));
-#endif
-
   *dso = apr_hash_get(dso_cache, fname, APR_HASH_KEY_STRING);
 
   /* First check to see if we've been through this before...  We do this
@@ -91,18 +72,13 @@ svn_dso_load(apr_dso_handle_t **dso, con
   if (*dso == NOT_THERE)
     {
       *dso = NULL;
-#if APR_HAS_THREADS
-      status = apr_thread_mutex_unlock(dso_mutex);
-      if (status)
-        return svn_error_wrap_apr(status, _("Can't ungrab DSO mutex"));
-#endif
       return SVN_NO_ERROR;
     }
 
   /* If we got nothing back from the cache, try and load the library. */
   if (! *dso)
     {
-      status = apr_dso_load(dso, fname, dso_pool);
+      apr_status_t status = apr_dso_load(dso, fname, dso_pool);
       if (status)
         {
 #ifdef SVN_DEBUG_DSO
@@ -120,11 +96,6 @@ svn_dso_load(apr_dso_handle_t **dso, con
                        APR_HASH_KEY_STRING,
                        NOT_THERE);
 
-#if APR_HAS_THREADS
-          status = apr_thread_mutex_unlock(dso_mutex);
-          if (status)
-            return svn_error_wrap_apr(status, _("Can't ungrab DSO mutex"));
-#endif
           return SVN_NO_ERROR;
         }
 
@@ -135,11 +106,16 @@ svn_dso_load(apr_dso_handle_t **dso, con
                    *dso);
     }
 
-#if APR_HAS_THREADS
-  status = apr_thread_mutex_unlock(dso_mutex);
-  if (status)
-    return svn_error_wrap_apr(status, _("Can't ungrab DSO mutex"));
-#endif
+  return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_dso_load(apr_dso_handle_t **dso, const char *fname)
+{
+  if (! dso_pool)
+    SVN_ERR(svn_dso_initialize2());
+
+  SVN_MUTEX__WITH_LOCK(dso_mutex, svn_dso_load_internal(dso, fname));
 
   return SVN_NO_ERROR;
 }

Modified: subversion/branches/tree-read-api/subversion/libsvn_subr/utf.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/libsvn_subr/utf.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/libsvn_subr/utf.c (original)
+++ subversion/branches/tree-read-api/subversion/libsvn_subr/utf.c Thu Oct 13 08:13:15 2011
@@ -42,6 +42,7 @@
 #include "private/svn_utf_private.h"
 #include "private/svn_dep_compat.h"
 #include "private/svn_string_private.h"
+#include "private/svn_mutex.h"
 
 
 
@@ -53,9 +54,7 @@ static const char *SVN_UTF_UTON_XLATE_HA
 
 static const char *SVN_APR_UTF8_CHARSET = "UTF-8";
 
-#if APR_HAS_THREADS
-static apr_thread_mutex_t *xlate_handle_mutex = NULL;
-#endif
+static svn_mutex__t *xlate_handle_mutex = NULL;
 
 /* The xlate handle cache is a global hash table with linked lists of xlate
  * handles.  In multi-threaded environments, a thread "borrows" an xlate
@@ -99,10 +98,6 @@ xlate_cleanup(void *arg)
 {
   /* We set the cache variables to NULL so that translation works in other
      cleanup functions, even if it isn't cached then. */
-#if APR_HAS_THREADS
-  apr_thread_mutex_destroy(xlate_handle_mutex);
-  xlate_handle_mutex = NULL;
-#endif
   xlate_handle_hash = NULL;
 
   /* ensure no stale objects get accessed */
@@ -125,27 +120,24 @@ xlate_handle_node_cleanup(void *arg)
 void
 svn_utf_initialize(apr_pool_t *pool)
 {
-  apr_pool_t *subpool;
-#if APR_HAS_THREADS
-  apr_thread_mutex_t *mutex;
-#endif
-
   if (!xlate_handle_hash)
     {
       /* We create our own subpool, which we protect with the mutex.
          We can't use the pool passed to us by the caller, since we will
          use it for xlate handle allocations, possibly in multiple threads,
          and pool allocation is not thread-safe. */
-      subpool = svn_pool_create(pool);
-#if APR_HAS_THREADS
-      if (apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, subpool)
-          == APR_SUCCESS)
-        xlate_handle_mutex = mutex;
-      else
-        return;
-#endif
+      apr_pool_t *subpool = svn_pool_create(pool);
+      svn_mutex__t *mutex;
+      svn_error_t *err = svn_mutex__init(&mutex, TRUE, subpool);
+      if (err)
+        {
+          svn_error_clear(err);
+          return;
+        }
 
+      xlate_handle_mutex = mutex;
       xlate_handle_hash = apr_hash_make(subpool);
+
       apr_pool_cleanup_register(subpool, NULL, xlate_cleanup,
                                 apr_pool_cleanup_null);
     }
@@ -202,91 +194,17 @@ atomic_swap(void * volatile * mem, void 
 #endif
 }
 
-/* Set *RET to a handle node for converting from FROMPAGE to TOPAGE,
-   creating the handle node if it doesn't exist in USERDATA_KEY.
-   If a node is not cached and apr_xlate_open() returns APR_EINVAL or
-   APR_ENOTIMPL, set (*RET)->handle to NULL.  If fail for any other
-   reason, return the error.
-
-   Allocate *RET and its xlate handle in POOL if svn_utf_initialize()
-   hasn't been called or USERDATA_KEY is NULL.  Else, allocate them
-   in the pool of xlate_handle_hash. */
+/* Set *RET to a newly created handle node for converting from FROMPAGE 
+   to TOPAGE, If apr_xlate_open() returns APR_EINVAL or APR_ENOTIMPL, set 
+   (*RET)->handle to NULL.  If fail for any other reason, return the error.
+   Allocate *RET and its xlate handle in POOL. */
 static svn_error_t *
-get_xlate_handle_node(xlate_handle_node_t **ret,
-                      const char *topage, const char *frompage,
-                      const char *userdata_key, apr_pool_t *pool)
+xlate_alloc_handle(xlate_handle_node_t **ret,
+                   const char *topage, const char *frompage,
+                   apr_pool_t *pool)
 {
-  xlate_handle_node_t **old_node_p;
-  xlate_handle_node_t *old_node = NULL;
   apr_status_t apr_err;
   apr_xlate_t *handle;
-  svn_error_t *err = NULL;
-
-  /* If we already have a handle, just return it. */
-  if (userdata_key)
-    {
-      if (xlate_handle_hash)
-        {
-          /* 1st level: global, static items */
-          if (userdata_key == SVN_UTF_NTOU_XLATE_HANDLE)
-            old_node = atomic_swap(&xlat_ntou_static_handle, NULL);
-          else if (userdata_key == SVN_UTF_UTON_XLATE_HANDLE)
-            old_node = atomic_swap(&xlat_uton_static_handle, NULL);
-
-          if (old_node && old_node->valid)
-            {
-              *ret = old_node;
-              return SVN_NO_ERROR;
-            }
-
-          /* 2nd level: hash lookup */
-#if APR_HAS_THREADS
-          apr_err = apr_thread_mutex_lock(xlate_handle_mutex);
-          if (apr_err != APR_SUCCESS)
-            return svn_error_create(apr_err, NULL,
-                                    _("Can't lock charset translation mutex"));
-#endif
-          old_node_p = apr_hash_get(xlate_handle_hash, userdata_key,
-                                    APR_HASH_KEY_STRING);
-          if (old_node_p)
-            old_node = *old_node_p;
-          if (old_node)
-            {
-              /* Ensure that the handle is still valid. */
-              if (old_node->valid)
-                {
-                  /* Remove from the list. */
-                  *old_node_p = old_node->next;
-                  old_node->next = NULL;
-#if APR_HAS_THREADS
-                  apr_err = apr_thread_mutex_unlock(xlate_handle_mutex);
-                  if (apr_err != APR_SUCCESS)
-                    return svn_error_create(apr_err, NULL,
-                                            _("Can't unlock charset "
-                                              "translation mutex"));
-#endif
-                  *ret = old_node;
-                  return SVN_NO_ERROR;
-                }
-            }
-        }
-      else
-        {
-          void *p;
-          /* We fall back on a per-pool cache instead. */
-          apr_pool_userdata_get(&p, userdata_key, pool);
-          old_node = p;
-          /* Ensure that the handle is still valid. */
-          if (old_node && old_node->valid)
-            {
-              *ret = old_node;
-              return SVN_NO_ERROR;
-            }
-        }
-    }
-
-  /* Note that we still have the mutex locked (if it is initialized), so we
-     can use the global pool for creating the new xlate handle. */
 
   /* The error handling doesn't support the following cases, since we don't
      use them currently.  Catch this here. */
@@ -295,10 +213,6 @@ get_xlate_handle_node(xlate_handle_node_
                  && (frompage != SVN_APR_LOCALE_CHARSET
                      || topage != SVN_APR_LOCALE_CHARSET));
 
-  /* Use the correct pool for creating the handle. */
-  if (userdata_key && xlate_handle_hash)
-    pool = apr_hash_pool_get(xlate_handle_hash);
-
   /* Try to create a handle. */
 #if defined(WIN32)
   apr_err = svn_subr__win32_xlate_open((win32_xlate_t **)&handle, topage,
@@ -327,8 +241,7 @@ get_xlate_handle_node(xlate_handle_node_
                               _("Can't create a character converter from "
                                 "'%s' to '%s'"), frompage, topage);
 
-      err = svn_error_create(apr_err, NULL, errstr);
-      goto cleanup;
+      return svn_error_create(apr_err, NULL, errstr);
     }
 
   /* Allocate and initialize the node. */
@@ -350,76 +263,178 @@ get_xlate_handle_node(xlate_handle_node_
     apr_pool_cleanup_register(pool, *ret, xlate_handle_node_cleanup,
                               apr_pool_cleanup_null);
 
- cleanup:
-  /* Don't need the lock anymore. */
-#if APR_HAS_THREADS
+  return SVN_NO_ERROR;
+}
+
+/* Extend xlate_alloc_handle by using USERDATA_KEY as a key in our
+   global hash map, if available.
+   
+   Allocate *RET and its xlate handle in POOL if svn_utf_initialize()
+   hasn't been called or USERDATA_KEY is NULL.  Else, allocate them
+   in the pool of xlate_handle_hash.
+   
+   Note: this function is not thread-safe. Call get_xlate_handle_node
+   instead. */
+static svn_error_t *
+get_xlate_handle_node_internal(xlate_handle_node_t **ret,
+                               const char *topage, const char *frompage,
+                               const char *userdata_key, apr_pool_t *pool)
+{
+  /* If we already have a handle, just return it. */
   if (userdata_key && xlate_handle_hash)
     {
-      apr_status_t unlock_err = apr_thread_mutex_unlock(xlate_handle_mutex);
-      if (unlock_err != APR_SUCCESS)
-        return svn_error_create(unlock_err, NULL,
-                                _("Can't unlock charset translation mutex"));
+      xlate_handle_node_t *old_node = NULL;
+
+      /* 2nd level: hash lookup */
+      xlate_handle_node_t **old_node_p = apr_hash_get(xlate_handle_hash, 
+                                                      userdata_key,
+                                                      APR_HASH_KEY_STRING);
+      if (old_node_p)
+        old_node = *old_node_p;
+      if (old_node)
+        {
+          /* Ensure that the handle is still valid. */
+          if (old_node->valid)
+            {
+              /* Remove from the list. */
+              *old_node_p = old_node->next;
+              old_node->next = NULL;
+              *ret = old_node;
+              return SVN_NO_ERROR;
+            }
+        }
+    }
+
+  /* Note that we still have the mutex locked (if it is initialized), so we
+     can use the global pool for creating the new xlate handle. */
+
+  /* Use the correct pool for creating the handle. */
+  pool = apr_hash_pool_get(xlate_handle_hash);
+
+  return xlate_alloc_handle(ret, topage, frompage, pool);
+}
+
+/* Set *RET to a handle node for converting from FROMPAGE to TOPAGE,
+   creating the handle node if it doesn't exist in USERDATA_KEY.
+   If a node is not cached and apr_xlate_open() returns APR_EINVAL or
+   APR_ENOTIMPL, set (*RET)->handle to NULL.  If fail for any other
+   reason, return the error.
+
+   Allocate *RET and its xlate handle in POOL if svn_utf_initialize()
+   hasn't been called or USERDATA_KEY is NULL.  Else, allocate them
+   in the pool of xlate_handle_hash. */
+static svn_error_t *
+get_xlate_handle_node(xlate_handle_node_t **ret,
+                      const char *topage, const char *frompage,
+                      const char *userdata_key, apr_pool_t *pool)
+{
+  xlate_handle_node_t *old_node = NULL;
+
+  /* If we already have a handle, just return it. */
+  if (userdata_key)
+    {
+      if (xlate_handle_hash)
+        {
+          /* 1st level: global, static items */
+          if (userdata_key == SVN_UTF_NTOU_XLATE_HANDLE)
+            old_node = atomic_swap(&xlat_ntou_static_handle, NULL);
+          else if (userdata_key == SVN_UTF_UTON_XLATE_HANDLE)
+            old_node = atomic_swap(&xlat_uton_static_handle, NULL);
+
+          if (old_node && old_node->valid)
+            {
+              *ret = old_node;
+              return SVN_NO_ERROR;
+            }
+        }
+      else
+        {
+          void *p;
+          /* We fall back on a per-pool cache instead. */
+          apr_pool_userdata_get(&p, userdata_key, pool);
+          old_node = p;
+          /* Ensure that the handle is still valid. */
+          if (old_node && old_node->valid)
+            {
+              *ret = old_node;
+              return SVN_NO_ERROR;
+            }
+
+          return xlate_alloc_handle(ret, topage, frompage, pool);
+        }
     }
-#endif
 
-  return err;
+  SVN_MUTEX__WITH_LOCK(xlate_handle_mutex,
+                       get_xlate_handle_node_internal(ret,
+                                                      topage,
+                                                      frompage,
+                                                      userdata_key,
+                                                      pool));
+
+  return SVN_NO_ERROR;
+}
+
+/* Put back NODE into the xlate handle cache for use by other calls.
+   
+   Note: this function is not thread-safe. Call put_xlate_handle_node
+   instead. */
+static svn_error_t *
+put_xlate_handle_node_internal(xlate_handle_node_t *node,
+                               const char *userdata_key)
+{
+  xlate_handle_node_t **node_p = apr_hash_get(xlate_handle_hash,
+                                              userdata_key,
+                                              APR_HASH_KEY_STRING);
+  if (node_p == NULL)
+    {
+      userdata_key = apr_pstrdup(apr_hash_pool_get(xlate_handle_hash),
+                                  userdata_key);
+      node_p = apr_palloc(apr_hash_pool_get(xlate_handle_hash),
+                          sizeof(*node_p));
+      *node_p = NULL;
+      apr_hash_set(xlate_handle_hash, userdata_key,
+                    APR_HASH_KEY_STRING, node_p);
+    }
+  node->next = *node_p;
+  *node_p = node;
+  
+  return SVN_NO_ERROR;
 }
 
 /* Put back NODE into the xlate handle cache for use by other calls.
    If there is no global cache, store the handle in POOL.
-   Ignore errors related to locking/unlocking the mutex.
-   ### Mutex errors here are very weird. Should we handle them "correctly"
-   ### even if that complicates error handling in the routines below? */
-static void
+   Ignore errors related to locking/unlocking the mutex. */
+static svn_error_t *
 put_xlate_handle_node(xlate_handle_node_t *node,
                       const char *userdata_key,
                       apr_pool_t *pool)
 {
   assert(node->next == NULL);
   if (!userdata_key)
-    return;
+    return SVN_NO_ERROR;
 
   /* push previous global node to the hash */
   if (xlate_handle_hash)
     {
-      xlate_handle_node_t **node_p;
-
       /* 1st level: global, static items */
       if (userdata_key == SVN_UTF_NTOU_XLATE_HANDLE)
         node = atomic_swap(&xlat_ntou_static_handle, node);
       else if (userdata_key == SVN_UTF_UTON_XLATE_HANDLE)
         node = atomic_swap(&xlat_uton_static_handle, node);
       if (node == NULL)
-        return;
+        return SVN_NO_ERROR;
 
-#if APR_HAS_THREADS
-      if (apr_thread_mutex_lock(xlate_handle_mutex) != APR_SUCCESS)
-        SVN_ERR_MALFUNCTION_NO_RETURN();
-#endif
-      node_p = apr_hash_get(xlate_handle_hash, userdata_key,
-                            APR_HASH_KEY_STRING);
-      if (node_p == NULL)
-        {
-          userdata_key = apr_pstrdup(apr_hash_pool_get(xlate_handle_hash),
-                                     userdata_key);
-          node_p = apr_palloc(apr_hash_pool_get(xlate_handle_hash),
-                              sizeof(*node_p));
-          *node_p = NULL;
-          apr_hash_set(xlate_handle_hash, userdata_key,
-                       APR_HASH_KEY_STRING, node_p);
-        }
-      node->next = *node_p;
-      *node_p = node;
-#if APR_HAS_THREADS
-      if (apr_thread_mutex_unlock(xlate_handle_mutex) != APR_SUCCESS)
-        SVN_ERR_MALFUNCTION_NO_RETURN();
-#endif
+      SVN_MUTEX__WITH_LOCK(xlate_handle_mutex,
+                           put_xlate_handle_node_internal(node, 
+                                                          userdata_key));
     }
   else
     {
       /* Store it in the per-pool cache. */
       apr_pool_userdata_set(node, userdata_key, apr_pool_cleanup_null, pool);
     }
+    
+  return SVN_NO_ERROR;
 }
 
 /* Return the apr_xlate handle for converting native characters to UTF-8. */
@@ -720,9 +735,11 @@ svn_utf_stringbuf_to_utf8(svn_stringbuf_
         *dest = svn_stringbuf_dup(src, pool);
     }
 
-  put_xlate_handle_node(node, SVN_UTF_NTOU_XLATE_HANDLE, pool);
-
-  return err;
+  return svn_error_compose_create(err,
+                                  put_xlate_handle_node
+                                     (node, 
+                                      SVN_UTF_NTOU_XLATE_HANDLE,
+                                      pool));
 }
 
 
@@ -752,9 +769,11 @@ svn_utf_string_to_utf8(const svn_string_
         *dest = svn_string_dup(src, pool);
     }
 
-  put_xlate_handle_node(node, SVN_UTF_NTOU_XLATE_HANDLE, pool);
-
-  return err;
+  return svn_error_compose_create(err,
+                                  put_xlate_handle_node
+                                     (node, 
+                                      SVN_UTF_NTOU_XLATE_HANDLE,
+                                      pool));
 }
 
 
@@ -795,8 +814,11 @@ svn_utf_cstring_to_utf8(const char **des
 
   SVN_ERR(get_ntou_xlate_handle_node(&node, pool));
   err = convert_cstring(dest, src, node, pool);
-  put_xlate_handle_node(node, SVN_UTF_NTOU_XLATE_HANDLE, pool);
-  SVN_ERR(err);
+  SVN_ERR(svn_error_compose_create(err,
+                                   put_xlate_handle_node
+                                      (node, 
+                                       SVN_UTF_NTOU_XLATE_HANDLE,
+                                       pool)));
   return check_cstring_utf8(*dest, pool);
 }
 
@@ -815,8 +837,12 @@ svn_utf_cstring_to_utf8_ex2(const char *
   SVN_ERR(get_xlate_handle_node(&node, SVN_APR_UTF8_CHARSET, frompage,
                                 convset_key, pool));
   err = convert_cstring(dest, src, node, pool);
-  put_xlate_handle_node(node, convset_key, pool);
-  SVN_ERR(err);
+  SVN_ERR(svn_error_compose_create(err,
+                                   put_xlate_handle_node
+                                      (node, 
+                                       SVN_UTF_NTOU_XLATE_HANDLE,
+                                       pool)));
+
   return check_cstring_utf8(*dest, pool);
 }
 

Modified: subversion/branches/tree-read-api/subversion/libsvn_wc/merge.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/libsvn_wc/merge.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/libsvn_wc/merge.c (original)
+++ subversion/branches/tree-read-api/subversion/libsvn_wc/merge.c Thu Oct 13 08:13:15 2011
@@ -928,8 +928,9 @@ maybe_resolve_conflicts(svn_skel_t **wor
 /* Attempt a trivial merge of LEFT_ABSPATH and RIGHT_ABSPATH to TARGET_ABSPATH.
  * The merge is trivial if the file at LEFT_ABSPATH equals TARGET_ABSPATH,
  * because in this case the content of RIGHT_ABSPATH can be copied to the
- * target. On success, set *MERGE_OUTCOME to SVN_WC_MERGE_MERGED,
- * and install work queue items allocated in RESULT_POOL in *WORK_ITEMS.
+ * target. On success, set *MERGE_OUTCOME to SVN_WC_MERGE_MERGED in case the
+ * target was changed, or to SVN_WC_MERGE_UNCHANGED if the target was not
+ * changed. Install work queue items allocated in RESULT_POOL in *WORK_ITEMS.
  * On failure, set *MERGE_OUTCOME to SVN_WC_MERGE_NO_MERGE. */
 static svn_error_t *
 merge_file_trivial(svn_skel_t **work_items,
@@ -944,40 +945,48 @@ merge_file_trivial(svn_skel_t **work_ite
 {
   svn_skel_t *work_item;
   svn_boolean_t same_contents = FALSE;
-  svn_error_t *err;
+  svn_node_kind_t kind;
+  svn_boolean_t is_special;
+
+  /* If the target is not a normal file, do not attempt a trivial merge. */
+  SVN_ERR(svn_io_check_special_path(target_abspath, &kind, &is_special,
+                                    scratch_pool));
+  if (kind != svn_node_file || is_special)
+    {
+      *merge_outcome = svn_wc_merge_no_merge;
+      return SVN_NO_ERROR;
+    }
 
   /* If the LEFT side of the merge is equal to WORKING, then we can
    * copy RIGHT directly. */
-  err = svn_io_files_contents_same_p(&same_contents, left_abspath,
-                                     target_abspath, scratch_pool);
-  if (err)
+  SVN_ERR(svn_io_files_contents_same_p(&same_contents, left_abspath,
+                                       target_abspath, scratch_pool));
+  if (same_contents)
     {
-      if (APR_STATUS_IS_ENOENT(err->apr_err))
+      /* Check whether the left side equals the right side.
+       * If it does, there is no change to merge so we leave the target
+       * unchanged. */
+      SVN_ERR(svn_io_files_contents_same_p(&same_contents, left_abspath,
+                                           right_abspath, scratch_pool));
+      if (same_contents)
         {
-          /* This can happen if TARGET_ABSPATH is a broken symlink.
-           * Let the smart merge code handle this. */
-          svn_error_clear(err);
-          *merge_outcome = svn_wc_merge_no_merge;
-          return SVN_NO_ERROR;
+          *merge_outcome = svn_wc_merge_unchanged;
         }
       else
-        return svn_error_trace(err);
-    }
-
-  if (same_contents)
-    {
-      if (!dry_run)
         {
-          SVN_ERR(svn_wc__wq_build_file_install(&work_item,
-                                                db, target_abspath,
-                                                right_abspath,
-                                                FALSE /* use_commit_times */,
-                                                FALSE /* record_fileinfo */,
-                                                result_pool, scratch_pool));
-          *work_items = svn_wc__wq_merge(*work_items, work_item, result_pool);
+          *merge_outcome = svn_wc_merge_merged;
+          if (!dry_run)
+            {
+              SVN_ERR(svn_wc__wq_build_file_install(
+                        &work_item, db, target_abspath, right_abspath,
+                        FALSE /* use_commit_times */,
+                        FALSE /* record_fileinfo */,
+                        result_pool, scratch_pool));
+              *work_items = svn_wc__wq_merge(*work_items, work_item,
+                                             result_pool);
+            }
         }
 
-      *merge_outcome = svn_wc_merge_merged;
       return SVN_NO_ERROR;
     }
 
@@ -1179,13 +1188,6 @@ merge_binary_file(svn_skel_t **work_item
 
   svn_dirent_split(&merge_dirpath, &merge_filename, mt->local_abspath, pool);
 
-  SVN_ERR(merge_file_trivial(work_items, merge_outcome,
-                             left_abspath, right_abspath,
-                             mt->local_abspath, dry_run, mt->db,
-                             result_pool, scratch_pool));
-  if (*merge_outcome == svn_wc_merge_merged)
-    return SVN_NO_ERROR;
-
   /* If we get here the binary files differ. Because we don't know how
    * to merge binary files in a non-trivial way we always flag a conflict. */
 
@@ -1414,41 +1416,48 @@ svn_wc__internal_merge(svn_skel_t **work
                                    cancel_func, cancel_baton,
                                    scratch_pool, scratch_pool));
 
-  if (is_binary)
-    {
-      SVN_ERR(merge_binary_file(work_items,
-                                merge_outcome,
-                                &mt,
-                                left_abspath,
-                                right_abspath,
-                                left_label,
-                                right_label,
-                                target_label,
-                                dry_run,
-                                left_version,
-                                right_version,
-                                detranslated_target_abspath,
-                                conflict_func,
-                                conflict_baton,
-                                result_pool, scratch_pool));
-    }
-  else
+  SVN_ERR(merge_file_trivial(work_items, merge_outcome,
+                             left_abspath, right_abspath,
+                             target_abspath, dry_run, db,
+                             result_pool, scratch_pool));
+  if (*merge_outcome == svn_wc_merge_no_merge)
     {
-      SVN_ERR(merge_text_file(work_items,
-                              merge_outcome,
-                              &mt,
-                              left_abspath,
-                              right_abspath,
-                              left_label,
-                              right_label,
-                              target_label,
-                              dry_run,
-                              left_version,
-                              right_version,
-                              detranslated_target_abspath,
-                              conflict_func, conflict_baton,
-                              cancel_func, cancel_baton,
-                              result_pool, scratch_pool));
+      if (is_binary)
+        {
+          SVN_ERR(merge_binary_file(work_items,
+                                    merge_outcome,
+                                    &mt,
+                                    left_abspath,
+                                    right_abspath,
+                                    left_label,
+                                    right_label,
+                                    target_label,
+                                    dry_run,
+                                    left_version,
+                                    right_version,
+                                    detranslated_target_abspath,
+                                    conflict_func,
+                                    conflict_baton,
+                                    result_pool, scratch_pool));
+        }
+      else
+        {
+          SVN_ERR(merge_text_file(work_items,
+                                  merge_outcome,
+                                  &mt,
+                                  left_abspath,
+                                  right_abspath,
+                                  left_label,
+                                  right_label,
+                                  target_label,
+                                  dry_run,
+                                  left_version,
+                                  right_version,
+                                  detranslated_target_abspath,
+                                  conflict_func, conflict_baton,
+                                  cancel_func, cancel_baton,
+                                  result_pool, scratch_pool));
+        }
     }
 
   /* Merging is complete.  Regardless of text or binariness, we might

Modified: subversion/branches/tree-read-api/subversion/svnserve/cyrus_auth.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/svnserve/cyrus_auth.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/svnserve/cyrus_auth.c (original)
+++ subversion/branches/tree-read-api/subversion/svnserve/cyrus_auth.c Thu Oct 13 08:13:15 2011
@@ -105,12 +105,7 @@ static sasl_callback_t callbacks[] =
 static svn_error_t *initialize(void *baton, apr_pool_t *pool)
 {
   int result;
-  apr_status_t status;
-
-  status = svn_ra_svn__sasl_common_init(pool);
-  if (status)
-    return svn_error_wrap_apr(status,
-                              _("Could not initialize the SASL library"));
+  SVN_ERR(svn_ra_svn__sasl_common_init(pool));
 
   /* The second parameter tells SASL to look for a configuration file
      named subversion.conf. */

Modified: subversion/branches/tree-read-api/subversion/tests/cmdline/log_tests.py
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/tests/cmdline/log_tests.py?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/tests/cmdline/log_tests.py (original)
+++ subversion/branches/tree-read-api/subversion/tests/cmdline/log_tests.py Thu Oct 13 08:13:15 2011
@@ -2028,6 +2028,72 @@ def log_on_nonexistent_path_and_valid_re
   svntest.actions.run_and_verify_svn(None, None, expected_error,
                                      'log', '-q', bad_path_default_rev)
 
+#----------------------------------------------------------------------
+# Test for issue #4022 'svn log -g interprets change in inherited mergeinfo
+# due to move as a merge'.
+@Issue(4022)
+@XFail()
+def merge_sensitive_log_copied_path_inherited_mergeinfo(sbox):
+  "log -g on copied path with inherited mergeinfo"
+
+  sbox.build()
+  wc_dir = sbox.wc_dir
+  wc_disk, wc_status = set_up_branch(sbox, branch_only=True)
+
+  A_path          = os.path.join(wc_dir, 'A')
+  gamma_COPY_path = os.path.join(wc_dir, 'A_COPY', 'D', 'gamma')
+  old_gamma_path  = os.path.join(wc_dir, 'A', 'D', 'gamma')
+  new_gamma_path  = os.path.join(wc_dir, 'A', 'C', 'gamma')
+
+  # r3 - Modify a file (A_COPY/D/gamma) on the branch
+  svntest.main.file_write(gamma_COPY_path, "Branch edit.\n")
+  svntest.main.run_svn(None, 'ci', '-m', 'Branch edit', wc_dir)
+
+  # r4 - Reintegrate A_COPY to A
+  svntest.main.run_svn(None, 'up', wc_dir)
+  svntest.main.run_svn(None, 'merge', '--reintegrate',
+                       sbox.repo_url + '/A_COPY', A_path)
+  svntest.main.run_svn(None, 'ci', '-m', 'Reintegrate A_COPY to A', wc_dir)
+
+  # r5 - Move file modified by reintegrate (A/D/gamma to A/C/gamma).
+  svntest.main.run_svn(None, 'move', old_gamma_path, new_gamma_path)
+  svntest.main.run_svn(None, 'ci', '-m', 'Move file', wc_dir)
+
+  # 'svn log -g --stop-on-copy ^/A/C/gamma' hould return *only* r5
+  # Currently this test fails because the change in gamma's inherited
+  # mergeinfo between r4 and r5, due to the move, is understood as a merge:
+  #
+  #   >svn log -v -g --stop-on-copy ^^/A/C/gamma
+  #   ------------------------------------------------------------------------
+  #   r5 | jrandom | 2011-10-11 14:37:57 -0700 (Tue, 11 Oct 2011) | 1 line  #
+  #   Changed paths:
+  #      A /A/C/gamma (from /A/D/gamma:4)
+  #      D /A/D/gamma
+  #
+  #   Move file
+  #   ------------------------------------------------------------------------
+  #   r3 | jrandom | 2011-10-11 14:37:56 -0700 (Tue, 11 Oct 2011) | 1 line
+  #   Changed paths:
+  #      M /A_COPY/D/gamma
+  #   Reverse merged via: r5
+  #
+  #   Branch edit
+  #   ------------------------------------------------------------------------
+  #   r2 | jrandom | 2011-10-11 14:37:56 -0700 (Tue, 11 Oct 2011) | 1 line
+  #   Changed paths:
+  #      A /A_COPY (from /A:1)
+  #   Reverse merged via: r5
+  #
+  #   log msg
+  #   ------------------------------------------------------------------------
+  expected_merges = {5  : []}
+  svntest.main.run_svn(None, 'up', wc_dir)
+  exit_code, out, err = svntest.actions.run_and_verify_svn(
+    None, None, [], 'log', '-g', '--stop-on-copy',
+    sbox.repo_url + '/A/C/gamma')
+  log_chain = parse_log_output(out)
+  check_merge_results(log_chain, expected_merges)
+
 ########################################################################
 # Run the tests
 
@@ -2067,6 +2133,7 @@ test_list = [ None,
               merge_sensitive_log_ignores_cyclic_merges,
               log_with_unrelated_peg_and_operative_revs,
               log_on_nonexistent_path_and_valid_rev,
+              merge_sensitive_log_copied_path_inherited_mergeinfo,
              ]
 
 if __name__ == '__main__':

Modified: subversion/branches/tree-read-api/subversion/tests/libsvn_subr/cache-test.c
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/subversion/tests/libsvn_subr/cache-test.c?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/subversion/tests/libsvn_subr/cache-test.c (original)
+++ subversion/branches/tree-read-api/subversion/tests/libsvn_subr/cache-test.c Thu Oct 13 08:13:15 2011
@@ -135,7 +135,7 @@ test_inprocess_cache_basic(apr_pool_t *p
                                       APR_HASH_KEY_STRING,
                                       1,
                                       1,
-                                      TRUE,
+                                      APR_HAS_THREADS,
                                       "",
                                       pool));
 

Modified: subversion/branches/tree-read-api/tools/dist/templates/rc-news.ezt
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/tools/dist/templates/rc-news.ezt?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/tools/dist/templates/rc-news.ezt (original)
+++ subversion/branches/tree-read-api/tools/dist/templates/rc-news.ezt Thu Oct 13 08:13:15 2011
@@ -4,7 +4,7 @@
     title="Link to this section">&para;</a> 
 </h3> 
  
-<p>We are pleased to announce to release of Apache Subversion [version].  This
+<p>We are pleased to announce the release of Apache Subversion [version].  This
    release is not intended for production use, but is provided as a milestone
    to encourage wider testing and feedback from intrepid users and maintainers.
    Please see the

Modified: subversion/branches/tree-read-api/tools/dist/templates/stable-news.ezt
URL: http://svn.apache.org/viewvc/subversion/branches/tree-read-api/tools/dist/templates/stable-news.ezt?rev=1182711&r1=1182710&r2=1182711&view=diff
==============================================================================
--- subversion/branches/tree-read-api/tools/dist/templates/stable-news.ezt (original)
+++ subversion/branches/tree-read-api/tools/dist/templates/stable-news.ezt Thu Oct 13 08:13:15 2011
@@ -4,13 +4,13 @@
     title="Link to this section">&para;</a> 
 </h3> 
  
-<p>We are pleased to announce to release of Apache Subversion [version].  This
-   is the most complete Subversion release to date, and we encourage users
-   of Subversion to upgrade as soon as reasonable.  Please see the
-   <a href="">release
-   announcement</a> for more information about this release, and the
-   <a href="http://svn.apache.org/repos/asf/subversion/tags/[version]/CHANGES"> 
-   change log</a> for information about this release.</p> 
+<p>We are pleased to announce the release of Apache Subversion [version].
+   This is the most complete Subversion release to date, and we encourage
+   users of Subversion to upgrade as soon as reasonable.  Please see the
+   <a href=""
+   >release announcement</a> and the
+   <a href="http://svn.apache.org/repos/asf/subversion/tags/[version]/CHANGES"
+   >change log</a> for more information about this release.</p> 
  
 <p>To get this release from the nearest mirror, please visit our
    <a href="/download/#recommended-release">download page</a>.</p>