Daniel proposed in [1] that we introduce apr_atomic_casptr2() to
resolve the defect in the apr_atomic_casptr() API.
Namely the correct:
1. APR_DECLARE(void*) apr_atomic_casptr(void *volatile *mem, ...);
Versus the broken:
2. APR_DECLARE(void*) apr_atomic_casptr(volatile void **mem, ...);
The problem with 2 is that it does not prevent the compiler from
caching *mem into a local variable, which would break atomicity.
This is fixed in trunk already where we could change the API to 1, but
not in 1.x.
Anyone opposed to the attached patch (for 1.8.x only then)?
Regards;
Yann.
[1] https://bz.apache.org/bugzilla/show_bug.cgi?id=50731#c2
Index: atomic/netware/apr_atomic.c
===
--- atomic/netware/apr_atomic.c (revision 1902233)
+++ atomic/netware/apr_atomic.c (working copy)
@@ -72,6 +72,11 @@ APR_DECLARE(void *) apr_atomic_casptr(volatile voi
return (void*)atomic_cmpxchg((unsigned long *)mem,(unsigned long)cmp,(unsigned long)with);
}
+APR_DECLARE(void *) apr_atomic_casptr2(void *volatile *mem, void *with, const void *cmp)
+{
+return (void*)atomic_cmpxchg((unsigned long *)mem,(unsigned long)cmp,(unsigned long)with);
+}
+
APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
{
return (void*)atomic_xchg((unsigned long *)mem,(unsigned long)with);
Index: atomic/os390/atomic.c
===
--- atomic/os390/atomic.c (revision 1902233)
+++ atomic/os390/atomic.c (working copy)
@@ -94,6 +94,15 @@ void *apr_atomic_casptr(volatile void **mem_ptr,
_ptr);
return (void *)cmp_ptr;
}
+void *apr_atomic_casptr2(void *volatile *mem_ptr,
+ void *swap_ptr,
+ const void *cmp_ptr)
+{
+ __cs1(_ptr, /* automatically updated from mem on __cs1 failure */
+ mem_ptr, /* set from swap when __cs1 succeeds*/
+ _ptr);
+ return (void *)cmp_ptr;
+}
#elif APR_SIZEOF_VOIDP == 8
void *apr_atomic_casptr(volatile void **mem_ptr,
void *swap_ptr,
@@ -104,6 +113,15 @@ void *apr_atomic_casptr(volatile void **mem_ptr,
_ptr);
return (void *)cmp_ptr;
}
+void *apr_atomic_casptr2(void *volatile *mem_ptr,
+ void *swap_ptr,
+ const void *cmp_ptr)
+{
+ __csg(_ptr, /* automatically updated from mem on __csg failure */
+ mem_ptr, /* set from swap when __csg succeeds*/
+ _ptr);
+ return (void *)cmp_ptr;
+}
#else
#error APR_SIZEOF_VOIDP value not supported
#endif /* APR_SIZEOF_VOIDP */
Index: atomic/unix/builtins.c
===
--- atomic/unix/builtins.c (revision 1902234)
+++ atomic/unix/builtins.c (working copy)
@@ -121,6 +121,16 @@ APR_DECLARE(void*) apr_atomic_casptr(volatile void
#endif
}
+APR_DECLARE(void*) apr_atomic_casptr2(void *volatile *mem, void *ptr, const void *cmp)
+{
+#if HAVE__ATOMIC_BUILTINS
+__atomic_compare_exchange_n(mem, (void *), ptr, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+return (void *)cmp;
+#else
+return (void *)__sync_val_compare_and_swap(mem, (void *)cmp, ptr);
+#endif
+}
+
APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *ptr)
{
#if HAVE__ATOMIC_BUILTINS
Index: atomic/unix/ia32.c
===
--- atomic/unix/ia32.c (revision 1902233)
+++ atomic/unix/ia32.c (working copy)
@@ -111,6 +111,24 @@ APR_DECLARE(void*) apr_atomic_casptr(volatile void
return prev;
}
+APR_DECLARE(void*) apr_atomic_casptr2(void *volatile *mem, void *with, const void *cmp)
+{
+void *prev;
+#if APR_SIZEOF_VOIDP == 4
+asm volatile ("lock; cmpxchgl %2, %1"
+ : "=a" (prev), "=m" (*mem)
+ : "r" (with), "m" (*mem), "0" (cmp));
+#elif APR_SIZEOF_VOIDP == 8
+asm volatile ("lock; cmpxchgq %q2, %1"
+ : "=a" (prev), "=m" (*mem)
+ : "r" ((unsigned long)with), "m" (*mem),
+"0" ((unsigned long)cmp));
+#else
+#error APR_SIZEOF_VOIDP value not supported
+#endif
+return prev;
+}
+
APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
{
void *prev;
Index: atomic/unix/mutex.c
===
--- atomic/unix/mutex.c (revision 1902233)
+++ atomic/unix/mutex.c (working copy)
@@ -190,6 +190,21 @@ APR_DECLARE(void*) apr_atomic_casptr(volatile void
return prev;
}
+APR_DECLARE(void*) apr_atomic_casptr(void *volatile *mem, void *with, const void *cmp)
+{
+void *prev;
+DECLARE_MUTEX_LOCKED(mutex, *mem);
+
+prev = *(void **)mem;
+if (prev == cmp) {
+*mem = with;
+}
+
+MUTEX_UNLOCK(mutex);
+
+return prev;
+}
+
APR_DECLARE(void*) apr_atomic_xchgptr(volatile void **mem, void *with)
{
void *prev;