You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@apr.apache.org by yl...@apache.org on 2023/02/14 11:18:00 UTC

svn commit: r1907642 - /apr/apr/trunk/configure.in

Author: ylavic
Date: Tue Feb 14 11:18:00 2023
New Revision: 1907642

URL: http://svn.apache.org/viewvc?rev=1907642&view=rev
Log:
configure: Test apr_uint64_t alignment for 64bit atomic builtins usability.

On some systems the __atomic builtins may be available only through libatomic
or fall back to libatomic when the atomic operations are not issued on a
suitably aligned address (64bit atomics on 8-byte aligned addresses only for
instance).

Modify the tests for HAVE_ATOMIC_BUILTINS64 and HAVE__ATOMIC_BUILTINS64 such
that the address for the atomic operations is not aligned (unless 64bit ints
always have the suitable alignment, i.e. mainly 64bit systems..).
Also, use the __atomic_always_lock_free() builtin to fail the test when the
compiler already knows about the alignment issue (falling back to libatomic,
which we don't require/want).

With this, 64bit builtins should be selected only for platforms that can
natively handle atomics on any apr_uin64_t (since the APR has no dedicated
8-byte aligned 64bit type for now), while the generic/mutex implementation
is used for others.


Modified:
    apr/apr/trunk/configure.in

Modified: apr/apr/trunk/configure.in
URL: http://svn.apache.org/viewvc/apr/apr/trunk/configure.in?rev=1907642&r1=1907641&r2=1907642&view=diff
==============================================================================
--- apr/apr/trunk/configure.in (original)
+++ apr/apr/trunk/configure.in Tue Feb 14 11:18:00 2023
@@ -572,31 +572,35 @@ AC_CACHE_CHECK([whether the compiler pro
 [AC_TRY_RUN([
 #if HAVE_STDINT_H
 #include <stdint.h>
+typedef uint64_t u64_t;
+#else
+typedef unsigned long long u64_t;
 #endif
 int main(int argc, const char *const *argv)
 {
-#if HAVE_STDINT_H
-    uint64_t val = 1010, tmp, *mem = &val;
-#else
-    unsigned long long val = 1010, tmp, *mem = &val;
-#endif
+    struct {
+        char pad0;
+        u64_t val;
+    } s;
+    u64_t *mem = &s.val, tmp;
 
-    if (__sync_fetch_and_add(&val, 1010) != 1010 || val != 2020)
+    s.val = 1010;
+    if (__sync_fetch_and_add(&s.val, 1010) != 1010 || s.val != 2020)
         return 1;
 
-    tmp = val;
-    if (__sync_fetch_and_sub(mem, 1010) != tmp || val != 1010)
+    tmp = s.val;
+    if (__sync_fetch_and_sub(mem, 1010) != tmp || s.val != 1010)
         return 1;
 
-    if (__sync_sub_and_fetch(&val, 1010) != 0 || val != 0)
+    if (__sync_sub_and_fetch(&s.val, 1010) != 0 || s.val != 0)
         return 1;
 
     tmp = 3030;
-    if (__sync_val_compare_and_swap(mem, 0, tmp) != 0 || val != tmp)
+    if (__sync_val_compare_and_swap(mem, 0, tmp) != 0 || s.val != tmp)
         return 1;
 
     __sync_synchronize();
-    if (__sync_lock_test_and_set(&val, 4040) != 3030)
+    if (__sync_lock_test_and_set(&s.val, 4040) != 3030)
         return 1;
 
     return 0;
@@ -606,31 +610,45 @@ AC_CACHE_CHECK([whether the compiler pro
 [AC_TRY_RUN([
 #if HAVE_STDINT_H
 #include <stdint.h>
+typedef uint64_t u64_t;
+#else
+typedef unsigned long long u64_t;
 #endif
+static int test_always_lock_free(volatile u64_t *val)
+{
+    return __atomic_always_lock_free(sizeof(*val), val);
+}
 int main(int argc, const char *const *argv)
 {
-#if HAVE_STDINT_H
-    uint64_t val = 1010, tmp, *mem = &val;
-#else
-    unsigned long long val = 1010, tmp, *mem = &val;
-#endif
+    struct {
+        char pad0;
+        u64_t val;
+        char pad1;
+        u64_t tmp;
+    } s;
+    u64_t *mem = &s.val;
+
+    /* check if alignment matters (no fallback to libatomic) */
+    if (!test_always_lock_free(&s.val))
+        return 1;
 
-    if (__atomic_fetch_add(&val, 1010, __ATOMIC_SEQ_CST) != 1010 || val != 2020)
+    s.val = 1010;
+    if (__atomic_fetch_add(&s.val, 1010, __ATOMIC_SEQ_CST) != 1010 || s.val != 2020)
         return 1;
 
-    tmp = val;
-    if (__atomic_fetch_sub(mem, 1010, __ATOMIC_SEQ_CST) != tmp || val != 1010)
+    s.tmp = s.val;
+    if (__atomic_fetch_sub(mem, 1010, __ATOMIC_SEQ_CST) != s.tmp || s.val != 1010)
         return 1;
 
-    if (__atomic_sub_fetch(&val, 1010, __ATOMIC_SEQ_CST) != 0 || val != 0)
+    if (__atomic_sub_fetch(&s.val, 1010, __ATOMIC_SEQ_CST) != 0 || s.val != 0)
         return 1;
 
-    tmp = val;
-    if (!__atomic_compare_exchange_n(mem, &tmp, 3030, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
-            || tmp != 0)
+    s.tmp = s.val;
+    if (!__atomic_compare_exchange_n(mem, &s.tmp, 3030, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+            || s.tmp != 0)
         return 1;
 
-    if (__atomic_exchange_n(&val, 4040, __ATOMIC_SEQ_CST) != 3030)
+    if (__atomic_exchange_n(&s.val, 4040, __ATOMIC_SEQ_CST) != 3030)
         return 1;
 
     return 0;